blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
672eef1114ae17959188fe66f7ad83d243a059ec
|
a7122e606af8a3475412e838ba3f40754c71d33a
|
/programs/rgbc_rt/__init__.py
|
eb7064e74add5f07c403bed5dde6b23371a09336
|
[] |
no_license
|
cedrichaase/nodergb-realtime-client
|
9b67313d3d7ac7aa88f9d656dc69a5ea04292304
|
06524948e53fd1551b3e2c08fb59126a18a334e1
|
refs/heads/master
| 2021-01-23T00:44:45.676396
| 2017-08-29T22:55:28
| 2017-08-29T22:55:28
| 92,838,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
from colour import Color
import socket
class RGBCRt:
def __init__(self, address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect((address, port))
self.socket.settimeout(0.0333)
@staticmethod
def __format_color(color):
hex_color = bytes(color.get_hex_l()[1:] + "\n", "utf8")
return hex_color
@staticmethod
def __format_packet(color, host=''):
content = "{0}{1}{2}\n".format(host, ':' if host else '', color)
return bytes(content, "utf8")
def set_color(self, color, host=''):
self.socket.sendall(self.__format_packet(color, host))
def set_timeout(self, timeout):
self.socket.settimeout(timeout)
|
[
"cedric@sineband.de"
] |
cedric@sineband.de
|
4298b9868a6620abc4e25531e8cf53de9072537d
|
87aa43e2f5247b271b8f96b5aab1315bb5eb0053
|
/angr/surveyors/sser.py
|
5420fdab0bea7d6f8a642fbf2595e8675fc6748d
|
[
"BSD-2-Clause"
] |
permissive
|
pabit/angr
|
5676778233fdf3a490541ba1d2c0a11e82d50686
|
2cb4a9a837d7eeaad9dc80efe8c9e4505fb31d04
|
refs/heads/master
| 2021-01-15T12:14:28.429885
| 2016-03-15T20:20:07
| 2016-03-15T20:20:07
| 53,982,483
| 1
| 0
| null | 2016-03-15T22:17:09
| 2016-03-15T22:17:09
| null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
from collections import defaultdict
import networkx
from ..surveyor import Surveyor
class Sser(Surveyor):
"""
Sser implements a _static_ symbolic execution engine!
"""
def __init__(self, project, start=None, ends=None, max_repeats=None):
Surveyor.__init__(self, project, start=start)
self._ends = ends
self._max_repeats = max_repeats
# We generate a CFG beginning from the starting point
self._cfg = self._project.CFG(
starts=(self.active[0].ip, ),
context_sensitivity_level=0,
call_depth=0
)
# Normalize it!
self._cfg.nomalize()
# Get all deadends
# We cannot directly use cfg.deadends because we want to eliminate all transitions to function
# calls and syscalls
deadends = self._deadends()
# Compute post-dominators
self._post_dominators = defaultdict(list)
for d in deadends:
post_dominators = self._cfg.immediate_postdominators(d)
for i, j in post_dominators.iteritems():
self._post_dominators[i].append(j)
# Create the inverse-post-dominator dict
self._inverse_post_dominators = defaultdict(set)
for n, l in self._post_dominators:
for dom in l:
self._inverse_post_dominators[dom].add(n)
@property
def done(self):
return len(self.active) == 0
def tick_path(self, p):
pass
def _deadends(self):
"""
Get all deadends for self._cfg
"""
graph = networkx.DiGraph()
# Make a copy of the nodes and edges in self._cfg, but only with jumpkinds that we care about
for src, dst, data in self._cfg.graph.edges(data=True):
if data['jumpkind'] == 'Ijk_Boring':
graph.add_edge(src, dst)
deadends = [ i for i in graph.nodes() if graph.out_degree(i) == 0 ]
return deadends
|
[
"fish@cs.ucsb.edu"
] |
fish@cs.ucsb.edu
|
7fc599f00564e8fdb2871be5e1ec1d397631e510
|
7729b7c46f951213a7f25b264fc26fcb002c464b
|
/openDataSpider/gzSpider.PY
|
3ace5e2c3518f2b644d23be314b6b58cbab060cb
|
[] |
no_license
|
duiliuliu/openData
|
882255fff2a0d039b42de26b07197db3d7ddb17b
|
88a62c6996ca1410877923292996692b91cda0ec
|
refs/heads/master
| 2021-09-25T14:53:38.225125
| 2018-10-23T00:02:48
| 2018-10-23T00:02:48
| 98,703,519
| 12
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
# *-* encoding = utf-8 *-*
# author: pengr
from spider import Spider,Writer
import json
import requests
import re
import os
import time
import multiprocessing
'''处理请求的数据'''
def func(response = None, data=None ,header=None ):
if response:
return handleResponse(response.text)['datasetlist']
if data:
for item in data:
item['description'] = re.sub('<.*?>','',item['description'])
if header:
myheader = {
'name':'数据目录名称',
'description':'数据摘要',
'topicName':'主题名称 ',
'orgName':'数据提供方',
'updTime':'最后更新时间',
'format':'数据下载格式',
'download_url':'文件url',
'download_file':'文件',
'calls':'调用量',
'views': '浏览量'
'header_sort':[
'name','description','topicName','orgName','updTime','format','download_url','download_file','calls','views'
]
}
header['myheader'] = myheader
def handleResponse(response):
response = re.sub("/\*\*/\w+\d+\(",'',response)
response = re.sub('\);','',response)
response = re.sub("'",'"',response)
return json.loads(response)['data']
def downloadfile(data):
print('\t-------'+data['id'][0])
timeout = 3
download_url = []
download_file = []
for id in data['id']:
url = 'http://www.gzdata.gov.cn/dataopen/api/filedata/{}?callback=jQuery1113020766250509768724_1529302845176&_=1529302845184'.format(id)
try:
response = requests.get(url,timeout=timeout)
res = handleResponse(response.text)
download_file.append(res['remark'])
url = 'http://www.gzdata.gov.cn/dataopen/api/url/{}?callback=jQuery1113006382656167261302_1529303313503&_=1529303313512'.format(res['shortUrl'])
try:
response = requests.get(url)
download_url.append(handleResponse(response.text)['realUrl'])
except Exception as e:
raise e
except Exception as e:
print(e)
dir = os.getcwd()+'/source/gz/'+data['name']
if not os.path.exists(dir):
os.mkdir(dir)
data['download_url'] = ' '.join(download_url)
for url,file in zip(download_url,download_file):
res = requests.get(url)
with open(dir+'/'+file,'wb+') as f:
f.write(res.content)
if __name__ == '__main__':
'''贵州 全部161页 文件61页'''
page = 161
urls = []
for pageNo in range(1,page):
''' dataType= (空)全部, dataType=0 文件,dataType=1 接口,dataType=3 应用 '''
url = "http://www.gzdata.gov.cn/dataopen/api/dataset?callback=jQuery1113095454735099338_1512229270187&pageNo="+str(pageNo)+"&pageSize=10&order=0&topicId=&orgId=&name=&dataType=0&_=1512229270189"
urls.append(url)
gzSpider = Spider.Spider()
gzSpider(urls,method = 'get', func = func)
path = os.getcwd()
dir = path+'/source/gz'
if not os.path.exists(dir):
os.mkdir(dir)
print('\n'.join(['-'*40,'\t下载全部资源','-'*40]))
pool = multiprocessing.Pool(processes = 6)
pool.map(downloadfile, gzSpider.data)
pool.close() # 关闭进程池,表示不能在往进程池中添加进程
pool.join() # 等待进程池中的所有进程执行完毕,必须在close()之后调用
# downloadfile(gzSpider.data[0])
# filecsv = 'source/gzdata.csv'
# Writer.writeDataCsv(gzSpider.tableHeader,gzSpider.data,filename=filecsv)
# filexlsx = 'source/gzdata.xlsx'
Writer.writeDataExcel(gzSpider.tableHeader,gzSpider.data,filename=filexlsx)
# Writer.writeDataMongo(gzSpider.tableHeader,gzSpider.data,collection_name='db.gz_catalog')
print('\tend!')
# http://gzopen.oss-cn-guizhou-a.aliyuncs.com/%E8%B4%B5%E5%B7%9E%E7%9C%81%E6%8A%95%E8%B5%84%E4%BF%83%E8%BF%9B%E5%B1%802015%E5%B9%B4%E5%BA%A6%E9%83%A8%E9%97%A8%E5%86%B3%E7%AE%97%E5%8F%8A%E2%80%9C%E4%B8%89%E5%85%AC%E2%80%9D%E7%BB%8F%E8%B4%B9%E5%86%B3%E7%AE%97%E4%BF%A1%E6%81%AF%E5%85%AC%E5%BC%80%E8%A1%A8.xls?Expires=1809321363&OSSAccessKeyId=cRMkEl0MLhpV9l7g&Signature=3SfWDvwyUL8f9F6LpcIwcpkSwzU%3D
|
[
"pengrui55555@163.com"
] |
pengrui55555@163.com
|
4f675dfd192b2a0c7713e71ab83d14b581992f7c
|
fe22e8ffdb1b2f1e11becc027e71a7a512fe56eb
|
/src/qcd_ntuples/make_runconfs_eventlists.py
|
041aec2354a8a878e6648c58f58608c24c3f04f1
|
[] |
no_license
|
HEP-KBFI/stpol
|
3cdb5dc125bb0394f4531abfdfe9629b0c8d0fa4
|
962837a3341dd26391025b9a07a9c1c93084bf64
|
refs/heads/master
| 2020-06-03T16:15:14.743807
| 2015-08-05T09:00:28
| 2015-08-05T09:00:28
| 5,716,481
| 0
| 1
| null | 2015-03-04T08:23:28
| 2012-09-07T12:27:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
import sys
import os
from parse_input import *
#Monkey-patch the system path to import the stpol header
sys.path.append(os.path.join(os.environ["STPOL_DIR"], "src/headers"))
from subprocess import call
import time
data_files = {}
for iso in ["iso"]:#, "antiiso"]:
data_files[iso] = get_data_files(iso)
#print data_files
for iso in ["iso", "antiiso"]:
for dataset, fileset in data_files[iso].items():
i = 0
print
print dataset
for (base_file, added_file) in fileset:
print i, base_file
bf_name = "/tmp/andres/qcdevents_%s_%s_%d.sh" % (dataset, iso, i)
batch_outfile = open(bf_name, "w")
batch_outfile.write("#!/bin/bash\n")
batch_outfile.write("source $STPOL_DIR/setenv.sh\n")
batch_outfile.write("python $STPOL_DIR/src/qcd_ntuples/qcd_eventlists.py " +dataset+ " " +iso+" " +str(i)+" "+base_file+" " + added_file + "\n")
print "python $STPOL_DIR/src/qcd_ntuples/qcd_eventlists.py " +dataset+ " " +iso+" " +str(i)+" "+base_file+" " + added_file + "\n"
batch_outfile.close()
call(["chmod", "755", bf_name])
suc = 1
while not suc == 0:
suc = call(["sbatch", "-x comp-d-058", bf_name])
print bf_name, suc
if not suc == 0:
print "XXX"
time.sleep(10)
i+=1
time.sleep(1)
|
[
"andres.tiko@cern.ch"
] |
andres.tiko@cern.ch
|
1751a7f5cd497e61557f431966933b4ef1f57979
|
8d1d8e94ea364368b17f839b50f4e404ede5c5e8
|
/beam_search.py
|
7e882a6bd0827335e42a11825cd3cc6303991dc7
|
[] |
no_license
|
Kaixin-Wu/myTransformer
|
c155fd49535cf94903ff637a9429bbc081319280
|
e055b470b9116b0baaded1cfa5f5660df27f87ef
|
refs/heads/master
| 2020-03-13T03:26:29.443719
| 2018-05-02T11:25:14
| 2018-05-02T11:25:14
| 130,860,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
import torch
import constants
import numpy as np
class Beam(object):
''' Store the neccesary info for beam search. '''
def __init__(self, size, cuda=False):
self.size = size
self.done = False
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size).fill_(constants.PAD)]
self.next_ys[0][0] = constants.BOS
def get_current_state(self):
"Get the outputs for the current timestep."
return self.get_tentative_hypothesis()
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1]
def advance(self, word_lk):
"Update the status and check for finished or not."
num_words = word_lk.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_lk = word_lk + self.scores.unsqueeze(1).expand_as(word_lk)
else:
beam_lk = word_lk[0]
flat_beam_lk = beam_lk.view(-1)
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 2nd sort
self.all_scores.append(self.scores)
self.scores = best_scores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.next_ys[-1][0] == constants.EOS:
self.done = True
self.all_scores.append(self.scores)
return self.done
def sort_scores(self):
"Sort the scores."
return torch.sort(self.scores, 0, True)
def get_the_best_score_and_idx(self):
"Get the score of the best in the beam."
scores, ids = self.sort_scores()
return scores[1], ids[1]
def get_tentative_hypothesis(self):
"Get the decoded sequence for the current timestep."
if len(self.next_ys) == 1:
dec_seq = self.next_ys[0].unsqueeze(1)
else:
_, keys = self.sort_scores()
hyps = [self.get_hypothesis(k) for k in keys]
hyps = [[constants.BOS] + h for h in hyps]
dec_seq = torch.from_numpy(np.array(hyps))
return dec_seq
def get_hypothesis(self, k):
"""
Walk back to construct the full hypothesis.
Parameters.
* `k` - the position in the beam to construct.
Returns.
1. The hypothesis
2. The attention at each time step.
"""
hyp = []
for j in range(len(self.prev_ks) - 1, -1, -1):
hyp.append(self.next_ys[j+1][k])
k = self.prev_ks[j][k]
return hyp[::-1]
|
[
"wukaixin_neu@163.com"
] |
wukaixin_neu@163.com
|
b09a90de25a746af393edfd411122bb615ebccb1
|
04b031a3f2d45be7e72c0a7c34a2b72e7ec7d136
|
/CenterTest.py
|
fa4d7c6d74343461a7ea56e5a0f6860fb9c3559a
|
[] |
no_license
|
davidwilson826/FinalProject
|
5e37b624fc03826e1d1e3050866d2bcd53ad8404
|
6630b24ac310a798b380bb227d4e782244fb8bb5
|
refs/heads/master
| 2021-01-01T04:01:20.033229
| 2016-06-06T18:56:37
| 2016-06-06T18:56:37
| 56,407,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
from ggame import App, Sprite, CircleAsset, Color, LineStyle
black = Color(0x000000, 1.0)
noline = LineStyle(0.0, black)
class Thing(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.fxcenter = self.fycenter = 0.5
class CenterTest(App):
def __init__(self):
super().__init__()
Thing(CircleAsset(50, noline, black), (0,0))
CenterTest().run()
|
[
"davidwilson@hanovernorwichschools.org"
] |
davidwilson@hanovernorwichschools.org
|
a89b312a6b81a5399dd456a92bdb5fd9671be1e4
|
6b75d0d0b991584691444cc5828c27db71f311d5
|
/SPA_12142018_9p2GHz_yale.py
|
ec92ec88a64ef674a8f6bf3e63f75a80fbc2fed8
|
[] |
no_license
|
katrinasliwa/QCI_SNAIL_design
|
8381f3c230e6e24444b49ff536790b598c2440fc
|
93e3d5f399f3eb8994e61596a5f005ed58d82dac
|
refs/heads/main
| 2023-06-02T16:40:47.456023
| 2021-06-21T18:17:40
| 2021-06-21T18:17:40
| 379,023,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,915
|
py
|
import sys
#sys.path.append('/Users/yiwenchu/Documents/circuitQED/CAD/SNAIL')
#sys.path.append('/Users/yiwenchu/Documents/python/gdspy-0.6/build/lib.macosx-10.9-x86_64-2.7/gdspy')
import os
import numpy as np
import gdspy
# import waferDefs as wd
# from toolsDefs import *
# import qubitDefs as qd
# import junctionDefs as jd
# import diceDefs as dd
# import markerDefs as md
from SNAILs import *
print('Using gdspy module version ' + gdspy.__version__)
layers = {'chipbounds_layer':0,
'dice_layer':12,
'coarse_layer':12,
'extra_coarse_layer':13,
'dolan_fine_layer':1,
'dolan_undercut_layer':2,
'dolan_bridge_layer':3,
'substrate_layer':10,
'fields_layer':11,};
nm = 0.001;
mm = 1000.0;
um = 1.0;
mil = 25.4;
inches = mil*1000;
# ------- Snail Array-------------------------
alpha = 0.1
l_j = 7.0 # big junction length
w_bridge = 0.500 # dolan bridge width
w_btwn = 2.200 #2.120 # width between bridges
n_j = 3 # number of juncitons in array
loopSize = (5.0, 7.750)
w_leadS = 1.0
underCut = 1.100
Mnew = 10 # number of snails in array
Mold = 20
w_lead = 3.0 # width of lead from edge of snails
overlap = 2.0 # overlap between snail and lumped cap (for EBPG stitching errors)
#l_tot = 70.0
l_tot = 134.5 # Vlad's M=20 array length is 232.0
oldl_tot = 232.0
#l_tot = 2000.0 # M=200 array length
#l_tot = l_C + g_C + overlap
subx = 34.0*mm #substrate dimensions
suby = 20.0*mm
chipx = 11.0*mm #chip dimensions
chipy = 5.0*mm
dicex = 250.0*um #alignment mark dimensions
dicey = 100.0*um
pump_gap = 50.0*um # gap for pump capacitor
res_width = 250.0*um #width of resonators
SPAshift = -2.8*mm #-2.8
labelshiftx = 1000.0*um #distances in from the upper right corner for labels
labelshifty = 600.0*um
Taperlength = 300.0*um
Res1length = 1000.0*um
Res2length = 1100.0*um
Res3length = 1170.0*um
Res4length = 380.0*um
Fingergap = 2.0*um
Fingerlength = 70.0*um
Sig_gap = 4.0*um
StickX = 800.0*um
dice_offset = 200.0*um
#2*Taperlength+Fingerlength+2*Fingergap+2*Res1length+l_tot
#--------Script params---------------------------
design_name = 'SPA_008_12142018_9p2GHz' #this is the name of the gds file that gets output
DoseTestBool = 1
TestStructuresBool = 1
SPABool = 1
#-----------Start drawing-----------------------------
# a single SNAIL cell
cellSnail = gdspy.Cell('SNAIL')
snail = make_snail(alpha, n_j, l_j, loopSize, w_bridge, w_btwn, underCut,
w_leadS, edgeSnail=(True, True), topLeftLead=(0.0, 0.0),
shorts=(False,False), opens=(False,False),
)
cellSnail.add(snail)
#RES_LAYER = layers['dolan_fine_layer'], UC_LAYER = layers['dolan_undercut_layer'], BRIDGE_LAYER = layers['dolan_bridge_layer']
shorts = [(False,False), (False,False), (False, False), (True,True)]
opens = [(False, False), (False, True), (True, False), (False, False)]
labels = ['', '_test1', '_test2', '_short']
# width sweep M=20
w_btwn_array = np.array([2.200])
for w in w_btwn_array:
for kk in range(len(opens)):
newcell_name_SnailArray_real = 'SPA_a%.2f_v2_M%d_w%d_uc%d_Ej%.1f'%(alpha, Mnew, w*1e3, underCut*1e3, l_j)
newcell_name_SnailArray = newcell_name_SnailArray_real + labels[kk]
newcellSnailArray = gdspy.Cell(newcell_name_SnailArray)
newsnailArray = make_snail_array(Mnew, l_tot, w_lead, alpha, n_j, l_j, loopSize,
w_bridge, w, underCut, w_leadS,
shorts=shorts[kk], opens=opens[kk],
center=(0.0, 0.0), rotation=(np.pi/2, (0,0)))
newcellSnailArray.add(newsnailArray)
for kk in range(len(opens)):
oldcell_name_SnailArray_real = 'SPA_a%.2f_v2_M%d_w%d_uc%d_Ej%.1f'%(alpha, Mold, w*1e3, underCut*1e3, l_j)
oldcell_name_SnailArray = oldcell_name_SnailArray_real + labels[kk]
oldcellSnailArray = gdspy.Cell(oldcell_name_SnailArray)
oldsnailArray = make_snail_array(Mold, oldl_tot, w_lead, alpha, n_j, l_j, loopSize,
w_bridge, w, underCut, w_leadS,
shorts=shorts[kk], opens=opens[kk],
center=(0.0, 0.0), rotation=(np.pi/2, (0,0)))
oldcellSnailArray.add(oldsnailArray)
"""
#Import resonators
#For some reason it is only importing polygons exported from MWO but not Vlad's hand-drawn polygons.
#This is ok since we want to test drawing those programmatically anyway
#res = gdspy.GdsImport('/Users/yiwenchu/Documents/circuitQED/CAD/SPA_from_vlad/resonator_2versions.gds')
gdsii = gdspy.GdsLibrary()
res1 = gdsii.read_gds('M10N7.gds')
resV1 = res1.extract('M10N7')
res2 = gdsii.read_gds('M10N2.gds')
resV2 = res2.extract('M10N2')
res3 = gdsii.read_gds('M10gap.gds')
resV3 = res3.extract('M10gap')
res4 = gdsii.read_gds('M20N7.gds')
resV4 = res4.extract('SPA_v2')
"""
#gdspy.LayoutViewer()
topCell = gdspy.Cell('topCell')
############### DRAW SPA's ############################
if SPABool:
substrate = gdspy.Rectangle((-subx/2, -suby/2), (subx/2, suby/2), layer = layers['substrate_layer'])
topCell.add(substrate)
chipCell = gdspy.Cell('chipCell')
chipCell.add(gdspy.Rectangle((-chipx/2, -chipy/2), (chipx/2, chipy/2), layer = layers['chipbounds_layer']))
chipCell.add(gdspy.Rectangle((-dicex/2, -chipy/2+dice_offset), (dicex/2, -chipy/2+dice_offset+dicey), layer = layers['dice_layer']))
chipCell.add(gdspy.Rectangle((-dicex/2, chipy/2-dice_offset), (dicex/2, chipy/2-dice_offset-dicey), layer = layers['dice_layer']))
locx = np.linspace(-chipx, chipx, 3)+0.2*mm
locy = np.linspace(-1.5*chipy, 1.5*chipy, 4)
SPA_locs = np.reshape([[(x, y) for y in locy] for x in locx], (len(locx)*len(locy),2))
#print SPA_locs
#SPA1_locs = SPA_locs[0::2]#[1:] the commented command removes the first element from the list
#SPA2_locs = SPA_locs[1::2]
#SPA1_locs = SPA_locs[[0,4]]#[1:] the commented command removes the first element from the list
#SPA2_locs = SPA_locs[1]
#SPA3_locs = SPA_locs[2]
SPA4_locs = SPA_locs[0:11]#SPA_locs[[3,5]]
# print SPA_locs
#SPA1_x = np.array([-1318*um, 1244*um])
#SPA2_x = np.array([-868*um, 794*um])
#SPA1_x = np.array([-(l_tot/2+Taperlength+2*Fingergap+Fingerlength+Res1length-2), (l_tot/2+Taperlength+Res1length-2)])
#SPA2_x = np.array([-(l_tot/2+Taperlength+2*Fingergap+Fingerlength+Res2length-2), (l_tot/2+Taperlength+Res2length-2)])
#SPA3_x = np.array([-(l_tot/2+Taperlength+Sig_gap+Res3length-2), (l_tot/2+Taperlength+Res3length-2)])
SPA4_x = np.array([-(oldl_tot/2+Taperlength+2*Fingergap+Fingerlength+Res4length-2), (oldl_tot/2+Taperlength+Res4length-2)])
'''
SPA1Cell = gdspy.Cell('SPA1')
SPA1Cell.add(gdspy.CellReference(chipCell))
# SPA1Cell.add(gdspy.CellReference(resV1))
SPA1Cell.add(gdspy.CellReference(resV1, (SPAshift, 0)))
SPA1Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA1_x[0]+SPAshift, res_width/2), layer = layers['coarse_layer']))
SPA1Cell.add(gdspy.Rectangle((SPA1_x[1]+pump_gap+SPAshift, -res_width/2),(chipx/2, res_width/2), layer = layers['coarse_layer']))
SPA1Cell.add(gdspy.Text('M10N7', 100*um, position = (chipx/2-labelshiftx, chipy/2-labelshifty), layer = layers['coarse_layer']))
#
#
SPA2Cell = gdspy.Cell('SPA2')
SPA2Cell.add(gdspy.CellReference(chipCell))
SPA2Cell.add(gdspy.CellReference(resV2, (SPAshift, 0)))
SPA2Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA2_x[0]+SPAshift, res_width/2), layer = layers['coarse_layer']))
SPA2Cell.add(gdspy.Rectangle((SPA2_x[1]+pump_gap+SPAshift, -res_width/2),(chipx/2, res_width/2), layer = layers['coarse_layer']))
SPA2Cell.add(gdspy.Text('M10N2', 100*um, position = (chipx/2-labelshiftx, chipy/2-labelshifty), layer = layers['coarse_layer']))
SPA3Cell = gdspy.Cell('SPA3')
SPA3Cell.add(gdspy.CellReference(chipCell))
SPA3Cell.add(gdspy.CellReference(resV3, (SPAshift, 0)))
SPA3Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA3_x[0]+SPAshift, res_width/2), layer = layers['coarse_layer']))
SPA3Cell.add(gdspy.Rectangle((SPA3_x[1]+pump_gap+SPAshift, -res_width/2),(chipx/2, res_width/2), layer = layers['coarse_layer']))
SPA3Cell.add(gdspy.Text('M10gap4', 100*um, position = (chipx/2-labelshiftx, chipy/2-labelshifty), layer = layers['coarse_layer']))
'''
SPA4Cell = gdspy.Cell('SPA4')
SPA4Cell.add(gdspy.CellReference(chipCell))
# SPA4Cell.add(gdspy.CellReference(resV4, (SPAshift, 0)))
SPA4Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA4_x[0]+SPAshift-StickX, res_width/2), layer = layers['extra_coarse_layer']))
SPA4Cell.add(gdspy.Rectangle((SPA4_x[0]+SPAshift-StickX, -res_width/2),(SPA4_x[0]+SPAshift, res_width/2), layer = layers['coarse_layer']))
SPA4Cell.add(gdspy.Rectangle((SPA4_x[1]+pump_gap+SPAshift, -res_width/2),(chipx/2-StickX, res_width/2), layer = layers['coarse_layer']))
SPA4Cell.add(gdspy.Rectangle((chipx/2-StickX, -res_width/2),(chipx/2, res_width/2), layer = layers['extra_coarse_layer']))
SPA4Cell.add(gdspy.Text('M20N7', 100*um, position = (chipx/2-labelshiftx, chipy/2-labelshifty), layer = layers['coarse_layer']))
'''
for loc in SPA1_locs:
topCell.add(gdspy.CellReference(SPA1Cell, loc))
#topCell.add(gdspy.CellReference(cell_name_SnailArray_real, loc))
topCell.add(gdspy.CellReference(newcell_name_SnailArray_real, np.add(loc, (SPAshift, 0))))
#for loc in SPA2_locs:
loc = SPA2_locs
topCell.add(gdspy.CellReference(SPA2Cell, loc))
topCell.add(gdspy.CellReference(newcell_name_SnailArray_real, np.add(loc, (SPAshift, 0))))
#for loc in SPA3_locs:
loc = SPA3_locs
topCell.add(gdspy.CellReference(SPA3Cell, loc))
topCell.add(gdspy.CellReference(newcell_name_SnailArray_real, np.add(loc, (SPAshift, 0))))
'''
for loc in SPA4_locs:
topCell.add(gdspy.CellReference(SPA4Cell, loc))
#topCell.add(gdspy.CellReference(cell_name_SnailArray_real, loc))
topCell.add(gdspy.CellReference(oldcell_name_SnailArray_real, np.add(loc, (SPAshift, 0))))
#draw field edges`
fieldlength = 600.0*um
numyfields = np.ceil(suby/fieldlength)
numxfields = np.ceil(subx/fieldlength)
Fieldcell = gdspy.Cell('Fieldcell')
for xind in np.arange(numxfields):
fieldx = xind*fieldlength
for yind in np.arange(numyfields):
fieldy = yind*fieldlength
Fieldcell.add(gdspy.Rectangle((fieldx, fieldy), (fieldx + fieldlength, fieldy + fieldlength), layer = layers['fields_layer']))
topCell.add(gdspy.CellReference(Fieldcell, (-subx/2,-suby/2)))
'''
SPA1_locs = np.array([(5.5*mm, 0*mm),(-5.5*mm,0*mm)]) #locations for SNAILs
SPA2_locs = np.array([(5.5*mm, -5*mm),(-5.5*mm,-5*mm)])
SPA1_x = np.array([-2318*um, 1244*um]) #x coordinates for microstrips leading to SPA
SPA2_x = np.array([-868*um, 794*um])
#SPA1_x = np.array([(-3368*um, -1244*um)]) #x coordinates for microstrips leading to SPA
#SPA2_x = np.array([--3368*um, 794*um])
SPA1Cell = gdspy.Cell('SPA1Cell')
SPA1Cell.add(gdspy.CellReference(chipCell))
SPA1Cell.add(gdspy.CellReference(resV1))
SPA1Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA1_x[0], res_width/2), layer = layers['coarse_layer']))
SPA1Cell.add(gdspy.Rectangle((SPA1_x[1]+50*um, -res_width/2),(chipx/2, res_width/2), layer = layers['coarse_layer']))
SPA2Cell = gdspy.Cell('SPA2Cell')
SPA2Cell.add(gdspy.CellReference(chipCell))
SPA2Cell.add(gdspy.CellReference(resV2))
SPA2Cell.add(gdspy.Rectangle((-chipx/2, -res_width/2),(SPA2_x[0], res_width/2), layer = layers['coarse_layer']))
SPA2Cell.add(gdspy.Rectangle((SPA2_x[1]+50*um, -res_width/2),(chipx/2, res_width/2), layer = layers['coarse_layer']))
for loc in SPA1_locs:
topCell.add(gdspy.CellReference(SPA1Cell, loc))
topCell.add(gdspy.CellReference(cell_name_SnailArray_real, loc))
for loc in SPA2_locs:
topCell.add(gdspy.CellReference(SPA2Cell, loc))
topCell.add(gdspy.CellReference(cell_name_SnailArray_real, loc))
'''
####################### Draw test structures ############################
if TestStructuresBool:
# test_st_locs = np.array([(2.5*mm, -7.5*mm), (2.5*mm, 7.5*mm)])
#test_st_locs = np.array([(11.0*mm, -5*mm)])
M20test_st_locs = np.array([(6.5*mm, 7.5*mm)])
tsx = 1.5*mm
tsy = 1.0*mm
#texts = ['10 real', '10 small', '10 large', '10 short']
oldtexts = ['20 real', '20 small', '20 large', '20 short']
taper_l = 300*um
extra_l = 85*um
# array_xs = gdspy.Cell(cell_name_SnailArray_real).get_bounding_box()[:, 0]
#newpadPts = [(l_tot/2-overlap, w_lead/2),(l_tot/2-overlap+taper_l, res_width/2),(l_tot/2-overlap+taper_l+extra_l, res_width/2),
# (l_tot/2-overlap+taper_l+extra_l, -res_width/2),(l_tot/2-overlap+taper_l, -res_width/2),(l_tot/2-overlap, -w_lead/2),]
oldpadPts = [(oldl_tot/2-overlap, w_lead/2),(oldl_tot/2-overlap+taper_l, res_width/2),(oldl_tot/2-overlap+taper_l+extra_l, res_width/2),
(oldl_tot/2-overlap+taper_l+extra_l, -res_width/2),(oldl_tot/2-overlap+taper_l, -res_width/2),(oldl_tot/2-overlap, -w_lead/2),]
'''
padCell = gdspy.Cell('padCell')
padCell.add(gdspy.Polygon(newpadPts, layer = layers['coarse_layer']))
padCell.add(gdspy.Polygon(newpadPts, layer = layers['coarse_layer']).rotate(np.pi))
'''
oldpadCell = gdspy.Cell('oldpadCell')
oldpadCell.add(gdspy.Polygon(oldpadPts, layer = layers['coarse_layer']))
oldpadCell.add(gdspy.Polygon(oldpadPts, layer = layers['coarse_layer']).rotate(np.pi))
testCell = gdspy.Cell('M10testCell')
oldtestCell = gdspy.Cell('M20testCell')
# for ind, x in enumerate(np.linspace(-test_space*1.5, test_space*1.5, 4)):
for ind, x in enumerate(np.array([(0, -3.0*tsy/2.0),(0, -tsy/2),(0, tsy/2),(0,3.0*tsy/2)])):
# print cell_name_SnailArray_real + labels[ind]
'''
testCell.add(gdspy.CellReference(newcell_name_SnailArray_real + labels[ind], x))
testCell.add(gdspy.CellReference(padCell, x))
testCell.add(gdspy.Text(texts[ind], 100*um, position = (x[0]-200*um, x[1]+600*um), layer = layers['coarse_layer']))
'''
oldtestCell.add(gdspy.CellReference(oldcell_name_SnailArray_real + labels[ind], x))
oldtestCell.add(gdspy.CellReference(oldpadCell, x))
oldtestCell.add(gdspy.Text(oldtexts[ind], 100*um, position = (x[0]-200*um, x[1]+400*um), layer = layers['coarse_layer']))
'''
for loc in test_st_locs:
topCell.add(gdspy.CellReference(testCell, loc))
'''
for loc in M20test_st_locs:
topCell.add(gdspy.CellReference(oldtestCell, loc))
####################### Draw dose tests ############################
if DoseTestBool:
DT_locs = np.array([(11.5*mm, 7.5*mm)])
d1reps = 7
d2reps = 6
DTx = 1.2*mm
DTy = 0.7*mm
DTCell = gdspy.Cell('DTCell')
for w in w_btwn_array:
for dose1 in np.arange(d1reps):
for dose2 in np.arange(d2reps):
curloc = [DTx*(dose1-(np.float(d1reps)-1)/2), DTy*(dose2-(np.float(d2reps)-1)/2)]
snailArray = make_snail_array(Mold, oldl_tot, w_lead, alpha, n_j, l_j, loopSize, w_bridge, w, underCut, w_leadS,
center=curloc, rotation=(np.pi/2, curloc))
#RES_LAYER = 30+dose2, UC_LAYER = layers['dolan_undercut_layer'], BRIDGE_LAYER = 20+dose1
DTCell.add(snailArray)
DTCell.add(gdspy.Polygon(np.add(oldpadPts, curloc), layer = layers['coarse_layer']))
DTCell.add(gdspy.Polygon(np.add(oldpadPts, curloc), layer = layers['coarse_layer']).rotate(np.pi, center = curloc))
for loc in DT_locs:
topCell.add(gdspy.CellReference(DTCell, loc))
'''
for w in w_btwn_array:
for dose1 in np.arange(d1reps-1):
curloc = DTy*(dose1-(np.float(d1reps)-1)/2)
for plgs in capPlgs:
DTCell.add(gdspy.Polygon(plgs, layer = 20+dose1).translate(1400*um, curloc))
DTCell.add(gdspy.Rectangle((-300*um+xshift, -res_width/2+curloc), (xshift, res_width/2+curloc), layer = 20+dose1))
DTCell.add(gdspy.Rectangle((xshift2, -res_width/2+curloc), (300*um+xshift2, res_width/2+curloc), layer = 20+dose1))
# DTCell.add(gdspy.Rectangle((-300*um-xshift, -res_width/2+curloc), (-xshift, res_width/2+curloc), layer = 20+dose1))
# DTCell.add(gdspy.Rectangle((2*um-xshift, -res_width/2+curloc), (300*um-xshift, res_width/2+curloc), layer = 20+dose1))
# DTCell.add(gdspy.Rectangle((-300*um-xshift, -res_width/2+curloc), (-xshift, res_width/2+curloc), layer = 20+dose1))
# DTCell.add(gdspy.Rectangle((2*um-xshift, -res_width/2+curloc), (300*um-xshift, res_width/2+curloc), layer = 20+dose1))
### generating one test structure for trying out different beamer flow
curloc = DTy*(d1reps/2)
for plgs in capPlgs:
DTCell.add(gdspy.Polygon(plgs, layer = 12).translate(1400*um, curloc))
DTCell.add(gdspy.Rectangle((-300*um+xshift, -res_width/2+curloc), (xshift, res_width/2+curloc), layer = 12))
DTCell.add(gdspy.Rectangle((xshift2, -res_width/2+curloc), (300*um+xshift2, res_width/2+curloc), layer = 12))
for loc in DT_locs:
topCell.add(gdspy.CellReference(DTCell, loc))
'''
########################## write the gds ##############################
gdspy.write_gds(design_name + '.gds', unit=1.0e-6, precision=1.0e-9)
#gdspy.gds_print(design_name + '.gds', unit=1.0e-6, precision=1.0e-9) #old gdspy version
########################## Output doses ###################################
base_50nA = 500
base_5nA = 150
scale = 1
#
# device_layers_50nA = [layers['coarse_layer']
# ]
# device_doses_50nA = np.array([560.0/base_50nA])*scale
# print 'device doses 50 nA: ' + np.array_str(base_50nA*device_doses_50nA, precision = 1)+'\n'
device_layers_50nA = []
device_doses_50nA = []
print('device doses 50 nA: '+np.array_str(np.array([750.0]), precision = 1)+'\n')
device_layers_5nA = [layers['dolan_fine_layer'],
layers['dolan_undercut_layer'],
layers['dolan_bridge_layer'],
]
device_doses_5nA = np.array([7.4,
1.48,
1.5,
])*scale
print('device doses 5 nA: ' + np.array_str(base_5nA*device_doses_5nA, precision = 1)+'\n')
layers_5nA = device_layers_5nA
doses_5nA = device_doses_5nA
layers_50nA = device_layers_50nA
doses_50nA = device_doses_50nA
if DoseTestBool:
#DT_coarse_layers = np.linspace(20, 20+d1reps-2, d1reps-1)
#DT_coarse_doses = np.linspace(1.0, 1.27, d1reps-1)*scale
DT_fine_layers = 30+np.arange(d2reps)
DT_bridge_layers = 20+np.arange(d1reps)
DT_bridge_doses = 1.0/base_5nA*np.linspace(150, 300, d1reps)*scale
DT_fine_doses = 1.0/base_5nA*np.linspace(900, 1300, d2reps)*scale
print('DT_bridge_doses: ' + np.array_str(base_5nA*DT_bridge_doses, precision = 1)+'\n')
print('DT_fine_doses: ' + np.array_str(base_5nA*DT_fine_doses, precision = 1)+'\n')
layers_5nA = np.concatenate((layers_5nA, DT_bridge_layers,DT_fine_layers))
doses_5nA = np.concatenate((doses_5nA, DT_bridge_doses, DT_fine_doses))
#print 'DT_coarse_doses: ' + np.array_str(base_50nA*DT_coarse_doses, precision = 1)+'\n'
# print 'hi'
#layers_50nA = np.concatenate((device_layers_50nA, DT_coarse_layers))
#layers_50nA = np.concatenate((device_layers_50nA, DT_coarse_layers))
#doses_50nA = np.concatenate((device_doses_50nA, DT_coarse_doses))
#np.savetxt('doses50nA.txt', zip(layers_50nA, doses_50nA), fmt='%u(0), %.4f')
np.savetxt('doses5nA.txt', zip(layers_5nA, doses_5nA), fmt='%u(0), %.4f')
|
[
"noreply@github.com"
] |
katrinasliwa.noreply@github.com
|
99e346ccaede7d498721e68350dc53e6e9a31dd1
|
b94064c4aad38ddf1a94d28733bccfd61aa3068c
|
/PRA3/misc/gen_stress.py
|
515b43859541188401c01cc04c04fe76a96c485e
|
[] |
no_license
|
Giantpizzahead/pro-club-problems
|
45b3738ec6682bf77815f0c1535a345fed031d9f
|
e2a2d618f3d781f32a9876eb9685f143f0b4706c
|
refs/heads/master
| 2023-07-01T19:24:42.768521
| 2021-08-09T12:06:50
| 2021-08-09T12:06:50
| 394,275,100
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
import random
chars = '()[]{}'
chances = [1, 1, 1, 1, 1, 1]
choices = []
for i in range(6):
for j in range(chances[i]):
choices.append(chars[i])
def gen_paren(i):
return random.choice(choices)
for j in range(10):
with open("PRA3/subtasks/03_stress/{:03}.in".format(j), 'w') as fout:
N = random.randint(1, 25)
for i in range(N):
fout.write(gen_paren(i))
fout.write("\n")
|
[
"43867185+Giantpizzahead@users.noreply.github.com"
] |
43867185+Giantpizzahead@users.noreply.github.com
|
cbae5ae4d12b34ea72f0da79a539ed72f0591d10
|
eba805f613787956a43f8831fd9626fe4abe238a
|
/Billetera_de_criptomonedas(Python)/billetera_Digital.py
|
a17938243086404891750e9c9bfd673f47bb78ad
|
[] |
no_license
|
simon320/Python_Billetera
|
c9451a795db0d3d621a4bd27f2739be44c108c05
|
e26588c189a5707299a44cafe5cf8c87dead0429
|
refs/heads/main
| 2023-06-30T10:24:48.051339
| 2021-08-03T01:56:35
| 2021-08-03T01:56:35
| 392,134,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,531
|
py
|
import pickle
import requests
import sys
import time
from requests.models import CaseInsensitiveDict
# CONFIGURACION DE FUNCIONES DE SERIALIZACION PARA BILLETERA E HISTORIAL
def escribe_archivo(): # Sobre-escribe billetera
global cripto_dic
archivo_serial = open("serial-billetera", "wb")
pickle.dump(cripto_dic, archivo_serial)
archivo_serial.close()
del archivo_serial
def recupera_archivo():# Lee archivo de la billetera
global cripto_dic
archivo_serial2 = open("serial-billetera", "rb")
cripto_dic = pickle.load(archivo_serial2)
def escribe_archivo2(): # Sobre-escribe historial
global lista_historial
archivo_serial = open("serial-historial", "wb")
pickle.dump(lista_historial, archivo_serial)
archivo_serial.close()
del archivo_serial
def recupera_archivo2(): # Lee archivo del historial
global lista_historial
archivo_serial2 = open("serial-historial", "rb")
lista_historial = pickle.load(archivo_serial2)
# CREANDO VARIABLES
cripto_dic = {} # Diccionario donde se cargaran las criptomonedas en billetera
lista_historial = [] # Lista donde se cargan las transacciones
movimiento = "" # Variable global para escribirmovimiento
codigo_dic = {'enviador' : 'eXYZ1243', 'destinatario' : 'dWER4532'} # Codigos para transacciones
cod = ""
opciones_menu = (
"Recibir cantidad",
"Transferir monto",
"Mostrar balance de una moneda",
"Mostrar balance general",
"Mostrar histórico de transacciones",
"Salir del programa"
)
# CREANDO COTIZADOR DE MONEDA
_ENDPOINT = "https://api.binance.com"
def _url(api):
return _ENDPOINT + api
def get_price(criptomoneda):
return requests.get(_url("/api/v3/ticker/price?symbol=" + criptomoneda))
# CREANDO LISTAS DE MONEDAS VALIDAS SEGUN "coinmarket"
monedas_lista=[]
COINMARKET_API_KEY = "5c9d25a1-b890-4060-9ef3-5f84a3788192"
headers = {'Accepts': 'application/json','X-CMC_PRO_API_KEY': COINMARKET_API_KEY}
data = requests.get("https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest",headers=headers).json()
for cripto in data["data"]:
monedas_lista.append(cripto["symbol"])
# CONFIGURANDO MENU DE OPCIONES
def menu_inicial():
while True:
print("""
¿Que deseas hacer?
1. Recibir cantidad.
2. Transferir monto.
3. Mostrar balance de una moneda.
4. Mostrar balance general.
5. Mostrar histórico de transacciones.
6. Salir del programa.""")
# VALIDANDO OPCION
opcion = input("\nElige una opcion: ")
while not opcion.replace('.','',1).isdigit():
print("Opcion incorrecta.")
opcion = input("Por favor, ingrese un numero de opcion disponible: ")
opcion = int(opcion)
if opcion == 1:
print("\n" + opciones_menu[0])
recibir()
elif opcion == 2:
print("\n" + opciones_menu[1])
transferencia()
elif opcion == 3:
print("\n" + opciones_menu[2])
cotizacion()
elif opcion == 4:
print("\n" + opciones_menu[3])
cotizacion_general()
elif opcion == 5:
print("\n" + opciones_menu[4])
historial()
elif opcion == 6:
escribe_archivo()
escribe_archivo2()
print("\nGuardando movimientos...")
time.sleep(1)
print("\nFin del programa.\n")
sys.exit()
# CONFIGURANDO VALIDACION E INGRESO DE LA MONEDA
def es_moneda(cripto): # Valida que la moneda exista en "coinmarket"
return cripto in monedas_lista
def ingresando_moneda(): # Ingresa y valida moneda
global moneda
moneda = input("Ingrese el nombre de la moneda: ")
while not es_moneda(moneda):
print("Moneda Invalida.")
moneda=input("Por favor, ingrese un nombre correcto de moneda: ")
else:
print("\nEligio " + moneda + ".")
# CONFIGURANDO VALIDACION NUMERICA
def es_numero(): # Valida que sea una cantidad real
global cantidad
cantidad = input("Ingrese el monto: ")
while not cantidad.replace('.','',1).isdigit():
print("Monto incorrecto.")
cantidad = input("Por favor, indique una cantidad real: ")
cantidad = float(cantidad)
# CONFIGURANDO VALIDACION DE CODIGO ENVIADOR Y DESTINATARIO
def es_codigo_destinatario(): # DESTINATARIO
codigo = input("Ingrese el codigo del destinatario: ")
while not codigo == codigo_dic['destinatario']:
print("Codigo incorrecto.")
codigo = input("Por favor, ingrese el codigo correcto del destinatario: ")
def es_codigo_enviador(): # ENVIADOR
codigo = input("Ingrese el codigo del enviador: ")
while not codigo == codigo_dic['enviador']:
print("Codigo incorrecto.")
codigo = input("Por favor, ingrese el codigo correcto del enviador: ")
# CONFIGURANDO ACTUALIZACION DEL MONTO DE LA MONEDA SI ES QUE SE TIENE EN BILLETERA, SI NO LA AGREGA
def incrementa_moneda(): # Crea o incrementa el monto
if moneda in cripto_dic:
cantidad_actual = cripto_dic.get(moneda)
cripto_dic[moneda] = cantidad_actual + cantidad
else:
cripto_dic[moneda] = cantidad
print(cripto_dic)
def reduce_moneda(): # Reduce el monto
if moneda in cripto_dic:
cripto_dic[moneda] -= cantidad
else:
cripto_dic[moneda] = cantidad
print(cripto_dic)
# Configurando registro Fecha/Movimiento
def fecha_movimiento():
global lista_historial
global movimiento
global cod
tiempo_seg = time.time()
fecha = time.ctime(tiempo_seg)
data = get_price(moneda + "USDT").json()
dolares = cripto_dic.get(moneda) * float(data.get("price"))
data_transaccion = f""" Se {movimiento} la cuenta con codigo:{codigo_dic.get(cod)},
{cantidad} {moneda} que equivale a Us${dolares}.
Fecha del movimiento {fecha}."""
lista_historial.append(data_transaccion) # Agrega a la lista el nuevo movimiento registrado
print(data_transaccion) # Refleja los cambios
escribe_archivo2() # Carga los cambios al archivo (serial-historial)
# DEFINIENDO OPCIONES-FUNCIONES DEL PROGRAMA
# Opcion 1 Recibir Criptomoneda
def recibir(): # Funcion para recibir un monto de alguna moneda
ingresando_moneda() # Ingresa y valida moneda
es_numero() # Valida que sea una cantidad real
es_codigo_enviador() # Valida codigo
global movimiento
global cod
movimiento = "recibieron desde"
cod = 'enviador'
deposito = input(f"Se van a depocitar {cantidad} {moneda} en su cuenta.\n¿Desea continuar? (s/n): ")
if deposito == "s": # Confirma movimiento
incrementa_moneda() # Actualiza el monto
escribe_archivo() # Carga los cambios al archivo (serial-billetera)
print("\nGuardando movimientos...")
time.sleep(2)
print("Operacion exitosa.")
fecha_movimiento() # Registra fecha de transaccion
time.sleep(2)
else:
print("Operacion cancelada.")
# Opcion 2 Transferir Criptomoneda
def transferencia(): # Funcion para enviar un monto de alguna moneda que se tenga en billetera
ingresando_moneda() # Ingresa y valida moneda
if moneda in cripto_dic: # Chequea que se tenga la moneda a transferir
es_numero() # Valida que sea una cantidad real
cantidad_actual = cripto_dic.get(moneda)
if cantidad_actual >= cantidad: # Valida que se cuente con la cantidad
es_codigo_destinatario() # Valida codigo
global movimiento
global cod
movimiento = "transfirieron a"
cod = 'destinatario'
deposito = input(f"Se van a depocitar {cantidad} {moneda} en la cuenta de destinatario escogido.\n¿Desea continuar? (s/n): ")
if deposito == "s":# Confirma movimiento
reduce_moneda() # Actualiza el monto
if cripto_dic[moneda] == 0.0: # Si la moneda queda en 0 la elimina del Diccionario
cripto_dic.pop(moneda)
escribe_archivo() # Carga los cambios al archivo (serial-billetera)
print("\nGuardando movimientos...")
time.sleep(2)
print("Operacion exitosa.")
fecha_movimiento() # Registra fecha de transaccion
time.sleep(2)
else:
print("Operacion cancelada.")
else:
print(f"Uds solo posee {cantidad_actual} de {moneda}.")
time.sleep(2)
else:
print(f"Uds no posee {moneda}")
time.sleep(2)
# Opcion 3 Mostrar cotizacion de una moneda
def cotizacion():
ingresando_moneda() # Ingresa y valida moneda
data = get_price(moneda + "USDT").json()
print(f"El precio de {moneda} es Us$",data["price"]) # Imprime cotizacion alctual segun "Binance"
if moneda in cripto_dic: # Si la moneda consultada se encuentra en billetera imprime cantidad y dolares al cambio
print("Usted cuenta con ",cripto_dic.get(moneda), " " ,moneda)
dolares = cripto_dic.get(moneda) * float(data.get("price"))
print(f"En dolares al cambio actual cuenta con Us${dolares}")
else:
print(f"Usted no posee {moneda}")
time.sleep(2)
# Opcion 4 Mostrar cotizacion de una moneda
def cotizacion_general():
total_dolares = 0 # Variable que aloja el total de los dolares al cambio
moneda_en_billetera = cripto_dic.keys() # Crea una lista de las monedas actuales
for cripto in moneda_en_billetera: # Itera sobre cada moneda e imprime moneda, cantidad y cotizacion
print(" En",cripto,"posee",cripto_dic.get(cripto),"unidades.")
data = get_price(cripto + "USDT").json()
print(f"Cotizacion actual {cripto} Us$",data["price"])
dolares = cripto_dic.get(cripto) * float(data.get("price"))
print(f"En dolares al cambio actual cuenta con Us${dolares}\n")
total_dolares += dolares
print("Usted cuenta con un total de Us$",total_dolares, "en criptomonedas.")
time.sleep(2)
# Opcion 5 Historial de transacciones
def historial():
for transaccion in lista_historial: # Itera e imprime sobre la lista de transacciones
print (transaccion,"\n")
# COMIENZO DEL PROGRAMA
recupera_archivo() # Carga la billetera
recupera_archivo2() # Carga el historial de transacciones
print("\nBienvenido a su billetera digital Desktop\n") # Titulo del Programa
menu_inicial()
|
[
"simonjuarezk2@gmail.com"
] |
simonjuarezk2@gmail.com
|
0f7db52aa57af3cd171f9d318421a2c4a9c16bca
|
7eca4650fffb671586e5f738d7b410e22a1f7c10
|
/backend/ghtproject/ghtelectroniccenter/serializers.py
|
142a359d6112512a429008c5ce9bde26764b62da
|
[] |
no_license
|
waffi/College-Hokabema
|
d811a1b31d8d21c6f3717ade31aefc372c8f5755
|
c67abfb33367225c7eea8332d6c851d23045bb29
|
refs/heads/master
| 2023-02-07T05:24:02.114649
| 2020-12-26T01:45:26
| 2020-12-26T01:45:26
| 79,775,190
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
from rest_framework import serializers
from django.core.serializers.json import Serializer
from ghtelectroniccenter.models import TblMenu, TblMenuDetail, Berita, Menu,Menuhaskandungan, Kandungan, Pelanggan, Cart, Kategori, Pesanan
class MenuDetailSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = TblMenuDetail
fields = '__all__'
depth = 1
class MenuSerializer(serializers.ModelSerializer):
#Menu_Detail = MenuDetailSerializer(many = True)
#Menu_Detail = serializers.PrimaryKeyRelatedField(many=True,read_only=True)
Menu_Detail = serializers.SlugRelatedField(many=True,read_only=True,slug_field='kandungan')
class Meta:
model = TblMenu
depth = 1
fields = '__all__'
#baru
class BeritaSerializer(serializers.ModelSerializer):
class Meta:
model = Berita
fields = ('deskripsi','gambar')
#Menu
class GetKandungan(serializers.ModelSerializer):
class Meta:
model = Kandungan
fields = '__all__'
class GetMenuhasKandunganSerializer(serializers.ModelSerializer):
#Konek Ke Kandungan
kandungan = GetKandungan(source='id_kandungan')
class Meta:
model = Menuhaskandungan
fields = '__all__'
class GetMenuSerializer(serializers.ModelSerializer):
KandunganSerializer = GetMenuhasKandunganSerializer(many = True)
class Meta:
model = Menu
depth = 1
fields = '__all__'
#Login
class LoginSerializer(Serializer):
class Meta:
model = Pelanggan
fields = 'user_name'
#PostOrder Serializer
class PostOrderSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = '__all__'
#Kategori
class KategoriSerializer(serializers.ModelSerializer):
class Meta:
model = Kategori
fields = '__all__'
#Pesanan
class PesananSerializer(serializers.ModelSerializer):
class Meta:
model = Pesanan
fields = '__all__'
#Nyoba Pelanggan
class PelangganSerializer(serializers.ModelSerializer):
class Meta:
model = Pelanggan
fields = ('user_name','password',)
class CariMenuSerializer(serializers.ModelSerializer):
class Meta:
model = Menu
fields = ('nama_menu',)
#Order Cart
class OrderCartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = '__all__'
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = '__all__'
|
[
"waffifaturrahman@yahoo.co.id"
] |
waffifaturrahman@yahoo.co.id
|
31387fde72e4cddfddb337ca19b248b3b8dbdddb
|
9e235d1c1c62981a4d3f0f09656715b408f33c5a
|
/bsl/migrations/0002_auto_20190412_1505.py
|
258807ce9c3d12a3a8ed11384cc3936165a79b35
|
[] |
no_license
|
shivamg7/learnwell
|
1d33c678573b047fe42102eb3f5dfb796524aa39
|
06348e9c3ce3b11d119de869e9c7888b73067c9a
|
refs/heads/master
| 2020-05-09T09:49:48.305587
| 2019-04-13T05:23:46
| 2019-04-13T05:23:46
| 181,018,034
| 0
| 0
| null | 2019-04-12T18:36:40
| 2019-04-12T13:53:24
|
HTML
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
# Generated by Django 2.2 on 2019-04-12 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bsl', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='optionA',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='question',
name='optionB',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='question',
name='optionC',
field=models.CharField(max_length=300),
),
]
|
[
"playfullittlekid@gmail.com"
] |
playfullittlekid@gmail.com
|
bd0fb18feff8a8c66e18114c00d697f885e4515c
|
8175dcd9793706f3b387aecd8464a4cecea72664
|
/samples/python-requests/request_to_cloudevent.py
|
11d3cc72ef8af7fc46cb6b961cdb233e67a0da90
|
[
"Apache-2.0"
] |
permissive
|
di/sdk-python
|
5c67180f00f9a76b2b85ac933d496c76a90afce4
|
ec0ae88f3b6b7844c22b21f9a08995d749d8199e
|
refs/heads/master
| 2022-07-27T18:52:51.116029
| 2020-03-20T14:19:46
| 2020-03-20T14:19:46
| 252,578,534
| 1
| 0
|
Apache-2.0
| 2020-04-02T22:29:40
| 2020-04-02T22:29:40
| null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import io
import requests
import sys
from cloudevents.sdk import marshaller
from cloudevents.sdk.event import v02
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit("Usage: python with_requests.py "
"<CloudEvent source URL>")
url = sys.argv[1]
response = requests.get(url)
response.raise_for_status()
headers = response.headers
data = io.BytesIO(response.content)
event = v02.Event()
http_marshaller = marshaller.NewDefaultHTTPMarshaller()
event = http_marshaller.FromRequest(
event, headers, data, json.load)
print(json.dumps(event.Properties()))
|
[
"denys.makogon@oracle.com"
] |
denys.makogon@oracle.com
|
74184826e58940d910271507ed40c9bdde47fc52
|
fab3d7ffdedf3e9f9c5b7fdf890bea9be956535f
|
/squealy/test/test_jwt_authentication.py
|
18ea47ae0d9bb7807b45dbb8d4c71526bf02cbf2
|
[
"MIT"
] |
permissive
|
zeeshankhan28/squealy
|
1f462cb8df8065c2a02491425856620988cf4215
|
5dfe9c5830ef74978f5defc872500fb710097408
|
refs/heads/phase-2
| 2020-03-22T11:18:37.832416
| 2018-08-16T07:40:20
| 2018-08-16T07:40:20
| 139,962,477
| 0
| 0
|
MIT
| 2018-07-17T10:38:27
| 2018-07-06T09:18:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 983
|
py
|
import jwt
from .test_base_file import BaseTestCase
from squealy.models import Chart
from django.test import Client
from test.test_support import EnvironmentVarGuard
class JWTAuthenticationTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.create_schema(self)
self.client = Client()
self.env = EnvironmentVarGuard()
self.env.set('JWT_KEY', 'secret')
def test_redirection_to_login_for_unauthenticated_requests(self):
with self.env:
response = self.client.get('/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/login'))
def test_login_with_jwt(self):
with self.env:
token = jwt.encode({'username': 'foo'}, 'secret', algorithm='HS256')
response = self.client.get('/' + '/?accessToken=' + token)
self.assertEqual(response.status_code, 200)
def tearDown(self):
BaseTestCase.delete_schema(self)
|
[
"devashish.sharma@hashedin.com"
] |
devashish.sharma@hashedin.com
|
3697b31ab9d91ad545499d4be31d62c7a3109755
|
daf38b94ce43ace82259c48bd2a45471dcf69245
|
/Test3.py
|
0758d64f6362d6d0bb371ec9acef27a4c114a285
|
[] |
no_license
|
app-stack/l3codingpractice
|
af2b336ac7d98b4d8bb11de17727fb258436513b
|
88ba3ca5b4f69c699aa3c1aa2b2f412e123f57e8
|
refs/heads/main
| 2023-07-01T01:19:08.763607
| 2021-08-06T17:20:31
| 2021-08-06T17:20:31
| 371,988,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
def mass2height(a,b,name):
# dict1={'mass':48, 'height':169, 'name':'derrick'};
# a=dict1['mass'];
# b=dict1['height'];
# mass2height= a/b;
# print("The mass to height ratio is {0}".format(mass2height))
dict1={
'mass':a,
'height':b,
'name':name
};
a=dict1['mass'];
b=dict1['height'];
Rmass2height= a/b;
print("The mass to height ratio is {0}".format(Rmass2height))
|
[
"noreply@github.com"
] |
app-stack.noreply@github.com
|
e58712c50ae00c1d0553ddac7da4b525d46de420
|
2faa702765aea4f460928a0931ddaf96a48e70fd
|
/calclock.py
|
7fff558fde40ef27ec1f8ada64d3fd4021035c50
|
[] |
no_license
|
Quitten/calclock
|
d2de16308d2ca911c6776665c2b34e7d703f37bd
|
04fcca81fa20ba600ed14a72878ab9f2945ffe49
|
refs/heads/master
| 2020-08-29T17:25:54.307739
| 2019-10-28T18:11:56
| 2019-10-28T18:11:56
| 218,110,786
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,072
|
py
|
from __future__ import print_function
import datetime
import dateutil.parser
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def isValidAlarm(event, minutesBeforeEvent, workingHours):
valid = True
if not is_ascii(event['summary']):
valid = False
start = event['start'].get('dateTime', event['start'].get('date'))
start = dateutil.parser.parse(start) - datetime.timedelta(minutes=minutesBeforeEvent)
if start.hour < workingHours['start'] or start.hour > workingHours['end']:
valid = False
return valid
def getService(scopes, tokenFileName, credFileName):
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(tokenFileName):
with open(tokenFileName, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credFileName, scopes)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(tokenFileName, 'wb') as token:
pickle.dump(creds, token)
return build('calendar', 'v3', credentials=creds)
def getEvents(service, maxResults=10):
now = datetime.datetime.now() - datetime.timedelta(hours=2) # 'Z' indicates UTC time
events_result = service.events().list(calendarId='primary', timeMin=now.isoformat()+ 'Z',
maxResults=maxResults, singleEvents=True,
orderBy='startTime').execute()
return events_result.get('items', [])
def create_event(service, start_time_str, summary, duration=1,attendees=None, description=None, location=None):
event = {
'summary': summary,
'location': location,
'description': description,
'start': {
'dateTime': start_time_str[:-7],
'timeZone': 'Asia/Jerusalem',
},
'end': {
'dateTime': start_time_str[:-7],
'timeZone': 'Asia/Jerusalem',
}
}
return service.events().insert(calendarId='primary', body=event,sendNotifications=False).execute()
def getAlarmEvents(events):
existingAlarms = []
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
if 'Alarm' in event['summary']:
existingAlarms.append({'start': start, 'title': event['summary']})
return existingAlarms
def writeAlarmsEvents(calCfg, alarms):
alarmsAdded = 0
service = getService(calCfg['scopes'], calCfg['tokenFile'], calCfg['credFile'])
existingEvents = getEvents(service)
existingAlarms = getAlarmEvents(existingEvents)
for alarm in alarms:
alarmFound = False
for existedAlarm in existingAlarms:
if existedAlarm['start'] == alarm['start']:
alarmFound = True
if not alarmFound:
print('Alarm set successfully')
create_event(service, alarm['start'], 'Alarm')
alarmsAdded = alarmsAdded+1
return alarmsAdded
# TODO: make alarms as set
def extractAlarmsFrom(events, minutesBeforeEvent, workingHours):
alarms = []
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
start = dateutil.parser.parse(start) - datetime.timedelta(minutes=minutesBeforeEvent)
if isValidAlarm(event, minutesBeforeEvent, workingHours):
alarms.append({'start': start.isoformat(), 'title': 'Alarm'})
return alarms
def generateAlarms(calCfg, workingHours, minutesBeforeEvent=10):
service = getService(calCfg['scopes'], calCfg['tokenFile'], calCfg['credFile'])
events = getEvents(service)
alarms = extractAlarmsFrom(events, minutesBeforeEvent, workingHours)
return alarms
def main():
# calendar 1 and 2 can be the calendar, but I wanted isolation to I used 2.
calendar1 = { 'scopes': ['https://www.googleapis.com/auth/calendar.readonly']
, 'tokenFile': 'token_cal1.pickle'
, 'credFile': 'credentials_cal1.json'
}
calendar2 = { 'scopes': ['https://www.googleapis.com/auth/calendar.events']
, 'tokenFile': 'token_cal2.pickle'
, 'credFile': 'credentials_cal2.json'
}
workingHours = {'start': 11, 'end': 20}
alarms = generateAlarms(calendar1, workingHours)
alarmsAdded = writeAlarmsEvents(calendar2, alarms)
print('{} alarms added'.format(alarmsAdded))
if __name__ == '__main__':
main()
|
[
"barakt@wix.com"
] |
barakt@wix.com
|
e0e27e72e01ef529f900d2aa0a660860810d5a8f
|
c4470e0c9aa84d21afed49fe31f3a19f9d4bfe9e
|
/core/migrations/0022_auto_20200815_1019.py
|
c1ea4d9e10f55378ff2066899997b91e8a95b54b
|
[] |
no_license
|
TRavi107/e_commerce_site
|
67f267a3041c7a2b3b072270d2888faf6c852ccc
|
63947deefeabcc5d14432f1a7813ae8375a6a7de
|
refs/heads/master
| 2022-12-05T06:23:33.484797
| 2020-08-26T10:18:20
| 2020-08-26T10:18:20
| 290,464,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Generated by Django 3.0.8 on 2020-08-15 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0021_auto_20200815_1011'),
]
operations = [
migrations.AlterField(
model_name='items',
name='img',
field=models.ImageField(upload_to='items_img/'),
),
]
|
[
"thapa.ravi7.rt@gmail.com"
] |
thapa.ravi7.rt@gmail.com
|
ee0a102d97f9dba135d56144294f82b1d94a70db
|
22d46da879c9b3da9e4c1860c814c220d3935028
|
/src/python/org/cassandra/geo_maps/geo_bounds.py
|
7e206fa10f79de418b3b12a56287286285f6727a
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
cassandra/geo_maps
|
d9bc9d19a9d891615aca94edd01881eba0900aec
|
0257bd73456f9312070e3f7627effee30b73fdea
|
refs/heads/main
| 2023-03-05T18:48:56.401709
| 2021-02-06T00:46:54
| 2021-02-06T00:46:54
| 336,423,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,435
|
py
|
from dataclasses import dataclass
from . import utils
@dataclass
class GeoBounds:
"""
Holds the 4 corner points of a geographic bounding "box" (its really a spherical cap).
"""
longitude_min : float = 999999.0
longitude_max : float = -999999.0
latitude_min : float = 999999.0
latitude_max : float = -999999.0
def __repr__(self):
return ( f'long = ( {self.longitude_min}, {self.longitude_max} ),'
' lat = ( {self.latitude_min}, {self.latitude_max} )' )
def __str__(self):
return self.__repr__()
def __bool__(self):
return ( self.longitude_min <= 180.0 ) and ( self.latitude_min <= 90.0 )
def corner_points(self):
return [ ( self.longitude_min, self.latitude_min ),
( self.longitude_max, self.latitude_min ),
( self.longitude_max, self.latitude_max ),
( self.longitude_min, self.latitude_max ) ]
@property
def longitude_span(self):
return abs(self.longitude_max - self.longitude_min)
@property
def latitude_span(self):
return abs(self.latitude_max - self.latitude_min)
@property
def longitude_span_miles(self):
reference_latitude = ( self.latitude_max + self.latitude_min ) / 2.0
return self.longitude_span * utils.get_miles_per_longitude(
reference_latitude = reference_latitude )
@property
def latitude_span_miles(self):
return self.latitude_span * utils.get_miles_per_latitude()
def add_point( self, longitude : float, latitude : float ):
self.add_longitude( longitude )
self.add_latitude( latitude )
return
def add_bounds( self, other_geo_bounds : 'GeoBounds' ):
self.longitude_min = min( other_geo_bounds.longitude_min, self.longitude_min )
self.longitude_max = max( other_geo_bounds.longitude_max, self.longitude_max )
self.latitude_min = min( other_geo_bounds.latitude_min, self.latitude_min )
self.latitude_max = max( other_geo_bounds.latitude_max, self.latitude_max )
return
def add_longitude( self, longitude : float ):
self.longitude_min = min( longitude, self.longitude_min )
self.longitude_max = max( longitude, self.longitude_max )
return
def add_latitude( self, latitude : float ):
self.latitude_min = min( latitude, self.latitude_min )
self.latitude_max = max( latitude, self.latitude_max )
return
def contains_point( self, longitude_deg : float, latitude_deg : float ):
return ( ( longitude_deg >= self.longitude_min )
and ( longitude_deg <= self.longitude_max )
and ( latitude_deg >= self.latitude_min )
and ( latitude_deg <= self.latitude_max ) )
def contains_bounds( self, other_geo_bounds : 'GeoBounds' ):
for longitude, latitude in other_geo_bounds.corner_points():
if not self.contains_point( longitude_deg = longitude, latitude_deg = latitude ):
return False
continue
return True
def intersect( self, other_geo_bounds : 'GeoBounds' ):
ll_x = max( self.longitude_min, other_geo_bounds.longitude_min )
ll_y = max( self.latitude_min, other_geo_bounds.latitude_min )
ur_x = min( self.longitude_max, other_geo_bounds.longitude_max )
ur_y = min( self.latitude_max, other_geo_bounds.latitude_max )
if ( ll_x > ur_x ) or ( ll_y > ur_y ):
return None
return GeoBounds( longitude_min = ll_x,
longitude_max = ur_x,
latitude_min = ll_y,
latitude_max = ur_y )
def intersects( self, other_geo_bounds : 'GeoBounds' ):
return self.intersect( other_geo_bounds ) is not None
def set_latitude_range_min( self, desired_miles : float ):
# N.B. Latitude distance not affected by longitude
current_miles = utils.get_distance( self.latitude_min, 0.0, self.latitude_max, 0.0 )
if current_miles >= desired_miles:
return
expand_miles = ( desired_miles - current_miles ) / 2.0
expand_latitude_deg = utils.get_latitude_span( distance_miles = expand_miles )
self.latitude_min -= expand_latitude_deg
self.latitude_max += expand_latitude_deg
return
|
[
"arc@cassandra.org"
] |
arc@cassandra.org
|
99212124838289ecc10f721c098976e9924049bb
|
58a9bc04baf10ee33c580c81b4ab4d61e2503fcd
|
/controllers/kinematics.py
|
41a195de10e527086d01027878597a40b68d07f1
|
[] |
no_license
|
chrismailer/hexapod-sim
|
8fe130669104f5f29c154a0f5d39f10d8d6693be
|
666d788248312b2ae858d018449394a926d3cc79
|
refs/heads/master
| 2023-07-08T15:29:31.954416
| 2021-08-12T06:33:28
| 2021-08-12T06:33:28
| 353,006,682
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
import numpy as np
l_1 = 0.05317 # coxa link
l_2 = 0.10188 # femur link
l_3 = 0.14735 # tibia link
# transforms foot positions and speeds into joint angles and speeds in the leg coordinate frame
def inverse(foot_position, foot_speed):
x, y, z = foot_position
dx, dy, dz = foot_speed
theta_1 = np.arctan2(y, x)
c_1, s_1 = np.cos(theta_1), np.sin(theta_1)
c_3 = ((x - l_1 * c_1)**2 + (y - l_1 * s_1)**2 + z**2 - l_2**2 - l_3**2) / (2 * l_2 * l_3)
s_3 = -np.sqrt(np.maximum(1 - c_3**2, 0)) # maximum ensures not negative
theta_2 = np.arctan2(z, (np.sqrt((x - l_1 * c_1)**2 + (y - l_1 * s_1)**2))) - np.arctan2((l_3 * s_3), (l_2 + l_3 * c_3))
theta_3 = np.arctan2(s_3, c_3)
c_2, s_2 = np.cos(theta_2), np.sin(theta_2)
c_23 = np.cos(theta_2 + theta_3)
with np.errstate(all='ignore'):
theta_dot_1 = (dy*c_1 - dx*s_1) / (l_1 + l_3*c_23 + l_2*c_2)
theta_dot_2 = (1/l_2)*(dz*c_2 - dx*c_1*s_2 - dy*s_1*s_2 + (c_3 / s_3)*(dz*s_2 + dx*c_1*c_2 + dy*c_2*s_1))
theta_dot_3 = -(1/l_2)*(dz*c_2 - dx*c_1*s_2 - dy*s_1*s_2 + ((l_2 + l_3*c_3)/(l_3*s_3))*(dz*s_2 + dx*c_1*c_2 + dy*c_2*s_1))
theta_dot_1 = np.nan_to_num(theta_dot_1, nan=0.0, posinf=0.0, neginf=0.0)
theta_dot_2 = np.nan_to_num(theta_dot_2, nan=0.0, posinf=0.0, neginf=0.0)
theta_dot_3 = np.nan_to_num(theta_dot_3, nan=0.0, posinf=0.0, neginf=0.0)
joint_angles = np.array([theta_1, theta_2, theta_3])
joint_speeds = np.array([theta_dot_1, theta_dot_2, theta_dot_3])
return joint_angles, joint_speeds
# transforms leg joint angles into foot positions in leg coordinate frame
def forward(joint_angles):
l_1, l_2, l_3 = self.l_1, self.l_2, self.l_3
theta_1, theta_2, theta_3 = joint_angles
# Compute point from joint angles
x = np.cos(theta_1) * (l_1 + l_3 * np.cos(theta_2 + theta_3) + l_2 * np.cos(theta_2))
y = np.sin(theta_1) * (l_1 + l_3 * np.cos(theta_2 + theta_3) + l_2 * np.cos(theta_2))
z = l_3 * np.sin(theta_2 + theta_3) + l_2 * np.sin(theta_2)
return np.array([x, y, z])
|
[
"christophermailer@icloud.com"
] |
christophermailer@icloud.com
|
d8b47d5c56905a1cab77e5282aefb790c2dce98e
|
8a8a9440c894edbdebdcd151057f1f5846e96cd8
|
/opmop/missions/offers/haulage.py
|
b73ffc2023c44e06cd77b2ec28a521ef9afe7c60
|
[] |
no_license
|
ajbolous/finalproject
|
051c8d5d127c4ab57106baf45c647d8622188db8
|
ecbc44cdc31ec04e8259cf30b6d20e518f437e50
|
refs/heads/master
| 2020-12-24T12:20:57.607963
| 2017-08-14T18:52:51
| 2017-08-14T18:52:51
| 73,051,186
| 1
| 0
| null | 2020-07-15T21:49:08
| 2016-11-07T07:04:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
from opmop.models.task import HaulageTask
import opmop.missions.utils as utils
from opmop.main import Application
def makeOffer(machine, schedule):
bestDump = None
minDistance = 10000
for dumpLocation in schedule.mission.dumpLocations:
path, distance = Application.mapping.calcShortestPath(
dumpLocation.point, schedule.mission.digLocation.point)
if (distance is not -1 and distance < minDistance):
bestDump = dumpLocation
minDistance = distance
if bestDump == None:
return False, []
consumedFuel = minDistance * machine.fuelConsumption
travelTime = minDistance / (machine.speed/4)
averageFillTime = 0.2
numberOfTravels = 1
numberOfRefeuls = (consumedFuel * numberOfTravels) / machine.fuelCapacity
tripTime = 2*travelTime + averageFillTime + (numberOfRefeuls * 1)
windows, numOfTasks = utils.getTimeWindows(
machine, schedule.date, tripTime)
if len(windows) <= 0:
return False, []
tripCost = tripTime + consumedFuel * 10 + distance * 10 + \
averageFillTime * numberOfTravels * machine.staticFuelConsumption
totalTarget = 0
totalCosts = 0
minWindowCost = 9999999
bestWindow = None
for window in windows:
currentLocation = utils.getLocationAtTime(machine, window[0])
numOfHaulers = utils.getNumberOfHaulers(schedule, window)
wasteAtTime = utils.getWasteAtTime(schedule, window[0])
if(numOfHaulers < 1 and wasteAtTime > machine.weightCapacity):
path, distance = Application.mapping.calcShortestPath(
currentLocation, schedule.mission.digLocation.point)
windowCost = distance * machine.fuelConsumption + 20 * numOfHaulers
if windowCost < minWindowCost:
bestWindow = window
minWindowCost = windowCost
if bestWindow == None:
return False, []
task = HaulageTask('{}-{}'.format(schedule.id, numberOfTravels), schedule.mission.digLocation,
bestDump, bestWindow[0], bestWindow[1], machine.weightCapacity, machine.id, "None")
return True, [task], windowCost + totalCosts + numOfTasks * 50
|
[
"ajbolous@gmail.com"
] |
ajbolous@gmail.com
|
0f24d645c4f3c6130847eeecfd53b7e7a50a93aa
|
e7a804e5e68c4019262a5cb619ba80ef34614ae3
|
/pybind/nos/v7_0_1b/brocade_interface_ext_rpc/get_ip_interface/input/__init__.py
|
22eceb54ce3e83ebccd7467c0586d893c60d249f
|
[
"Apache-2.0"
] |
permissive
|
shivharis/pybind
|
787978726f7efa7e4662d32ebe0075f36f6ff2f4
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
refs/heads/master
| 2021-06-10T14:37:04.186120
| 2017-01-24T22:13:25
| 2017-01-24T22:13:25
| 70,860,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,550
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface-ext - based on the path /brocade_interface_ext_rpc/get-ip-interface/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_type','__interface_name','__rbridge_id',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'port-channel': {'value': 5}, u'loopback': {'value': 7}, u'fortygigabitethernet': {'value': 4}, u'unknown': {'value': 1}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 3}, u'tunnel': {'value': 10}, u'hundredgigabitethernet': {'value': 9}, u'fibrechannel': {'value': 8}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='union', is_config=True)
self.__rbridge_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_interface_ext_rpc', u'get-ip-interface', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-ip-interface', u'input']
def _get_interface_type(self):
"""
Getter method for interface_type, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/interface_type (enumeration)
YANG Description: The type of the interface. An 'unknown' type
represents error scenario and should not be used.
"""
return self.__interface_type
def _set_interface_type(self, v, load=False):
"""
Setter method for interface_type, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/interface_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_type() directly.
YANG Description: The type of the interface. An 'unknown' type
represents error scenario and should not be used.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'port-channel': {'value': 5}, u'loopback': {'value': 7}, u'fortygigabitethernet': {'value': 4}, u'unknown': {'value': 1}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 3}, u'tunnel': {'value': 10}, u'hundredgigabitethernet': {'value': 9}, u'fibrechannel': {'value': 8}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_type must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'port-channel': {'value': 5}, u'loopback': {'value': 7}, u'fortygigabitethernet': {'value': 4}, u'unknown': {'value': 1}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 3}, u'tunnel': {'value': 10}, u'hundredgigabitethernet': {'value': 9}, u'fibrechannel': {'value': 8}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__interface_type = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_type(self):
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'port-channel': {'value': 5}, u'loopback': {'value': 7}, u'fortygigabitethernet': {'value': 4}, u'unknown': {'value': 1}, u'gigabitethernet': {'value': 2}, u'tengigabitethernet': {'value': 3}, u'tunnel': {'value': 10}, u'hundredgigabitethernet': {'value': 9}, u'fibrechannel': {'value': 8}, u'l2vlan': {'value': 6}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"The type of the interface. An 'unknown' type \nrepresents error scenario and should not be used."}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_interface_name(self):
"""
Getter method for interface_name, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/interface_name (union)
YANG Description: The Interface value. The interface value is always
interpreted within the context of the value of
'interface-type' leaf:
interface-type interface-name
----------------- --------------------
gigabitethernet [rbridge-id]/slot/port
tengigabitethernet [rbridge-id]/slot/port
fortygigabitethernet [rbridge-id]/slot/port
hundredgigabitethernet [rbridge-id]/slot/port
port-channel Port channel ID
l2vlan Vlan ID
unknown Zero-length string.
The value of an 'interface-name' must always be
consistent with the value of the associated
'interface-type'. Attempts to set an interface-name
to a value inconsistent with the associated
'interface-type' must fail with an error.
"""
return self.__interface_name
def _set_interface_name(self, v, load=False):
"""
Setter method for interface_name, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/interface_name (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_name() directly.
YANG Description: The Interface value. The interface value is always
interpreted within the context of the value of
'interface-type' leaf:
interface-type interface-name
----------------- --------------------
gigabitethernet [rbridge-id]/slot/port
tengigabitethernet [rbridge-id]/slot/port
fortygigabitethernet [rbridge-id]/slot/port
hundredgigabitethernet [rbridge-id]/slot/port
port-channel Port channel ID
l2vlan Vlan ID
unknown Zero-length string.
The value of an 'interface-name' must always be
consistent with the value of the associated
'interface-type'. Attempts to set an interface-name
to a value inconsistent with the associated
'interface-type' must fail with an error.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_name must be of a type compatible with union""",
'defined-type': "brocade-interface-ext:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='union', is_config=True)""",
})
self.__interface_name = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_name(self):
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='union', is_config=True)
def _get_rbridge_id(self):
"""
Getter method for rbridge_id, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/rbridge_id (uint32)
"""
return self.__rbridge_id
def _set_rbridge_id(self, v, load=False):
"""
Setter method for rbridge_id, mapped from YANG variable /brocade_interface_ext_rpc/get_ip_interface/input/rbridge_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbridge_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)""",
})
self.__rbridge_id = t
if hasattr(self, '_set'):
self._set()
def _unset_rbridge_id(self):
self.__rbridge_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, choice=(u'request-type', u'get-request'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='uint32', is_config=True)
interface_type = __builtin__.property(_get_interface_type, _set_interface_type)
interface_name = __builtin__.property(_get_interface_name, _set_interface_name)
rbridge_id = __builtin__.property(_get_rbridge_id, _set_rbridge_id)
__choices__ = {u'request-type': {u'get-request': [u'interface_type', u'interface_name', u'rbridge_id']}}
_pyangbind_elements = {'interface_type': interface_type, 'interface_name': interface_name, 'rbridge_id': rbridge_id, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
25ef49581e0a4c60622b6f335484660e0b3c0bd4
|
20936bf43b550ff8514164738529d0f78b8d8c14
|
/Zadanie 7/circles.py
|
7e41df55d94ae9100961df4c0e29906de7c7e00f
|
[] |
no_license
|
kamilck13/Python2019
|
125f993324df5c37c4307be6db14d478de032678
|
88bcc231c662e29efd7a164ad3a6cc3c4f75b4a2
|
refs/heads/master
| 2020-08-21T19:58:35.515530
| 2020-02-12T19:40:44
| 2020-02-12T19:40:44
| 216,235,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
#7.5
from points import Point
import math
import unittest
class Circle:
def __init__(self, x, y, radius):
if radius < 0:
raise ValueError("promien ujemny")
self.pt = Point(x, y)
self.radius = radius
def __repr__(self):
return ("Circle("+ str(self.pt.x) + ", " + str(self.pt.y) + ", " + str(self.radius) + ")")
def __eq__(self, other):
return self.pt == other.pt and self.radius == other.radius
def __ne__(self, other):
return not self == other
def area(self):
return (math.pi * self.radius * self.radius)
def move(self, x, y):
self.pt.x += x
self.pt.y += y
def cover(self, other):
odlSrodk = math.sqrt(math.pow(other.pt.x - self.pt.x, 2) + math.pow(other.pt.y - self.pt.y, 2))
if odlSrodk <= math.fabs(self.radius - other.radius): #jeden w drugim
if self.radius > other.radius:
return self
return other
else:
tmpX = (self.pt.x + other.pt.x)/2. #wyznaczam srodek na prostej laczacej srodki okregow
tmpY = (self.pt.y + other.pt.y)/2.
tmp = math.sqrt(math.pow(other.pt.x - self.pt.x, 2) + math.pow(other.pt.y - self.pt.y, 2)) #dlugosc odcinka laczacego srodki
tmpR = 0
if self.radius > other.radius:
tmpR = self.radius + (tmp/2.) #promien okregu
else:
tmpR = other.radius + (tmp/2.)
return Circle(tmpX, tmpY, tmpR)
class TestCircle(unittest.TestCase):
def setUp(self):
self.tmp = Circle(1,2,3)
self.tmp2 = Circle(0,0,0)
def test_init(self):
self.assertEqual(self.tmp.pt, Point(1,2))
self.assertEqual(self.tmp.pt.x, 1)
self.assertEqual(self.tmp.pt.y, 2)
self.assertEqual(self.tmp.radius, 3)
with self.assertRaises(ValueError):
Circle(0,0,-1)
def test_repr(self):
self.assertEqual(repr(self.tmp), "Circle(1, 2, 3)")
def test_eq(self):
self.assertFalse(self.tmp == Circle(1,2,4))
self.assertTrue(self.tmp == Circle(1.0,2.0,3.0))
self.assertTrue(self.tmp == Circle(1,2,3))
def test_ne(self):
self.assertTrue(self.tmp != Circle(1,2,4))
self.assertFalse(self.tmp != Circle(1.0,2.0,3.0))
self.assertFalse(self.tmp != Circle(1,2,3))
def test_area(self):
self.assertEqual(self.tmp.area(), math.pi * pow(3, 2))
self.assertEqual(self.tmp2.area(), 0)
def test_move(self):
self.tmp.move(1,2)
self.assertEqual(self.tmp, Circle(2,4,3))
self.tmp2.move(1, 2)
self.assertEqual(self.tmp2, Circle(1,2,0))
def test_cover(self):
self.assertEqual(self.tmp.cover(self.tmp2), Circle(1,2,3))
self.assertEqual(self.tmp2.cover(self.tmp), Circle(1,2,3))
self.assertEqual(Circle(0,0,2).cover(Circle(4,0,2)), Circle(2, 0, 4))
def tearDown(self): pass
if __name__ == '__main__':
unittest.main()
|
[
"kamil.ck13@gmail.com"
] |
kamil.ck13@gmail.com
|
cd338d7743fc838918a1cfb2641b8d4f168ac96a
|
9ea625715d429597198a7b305575d673e159e06c
|
/ml_classifiers/ID3Tree.py
|
e9f6295427ebdc6842f868700e107a90d7648373
|
[] |
no_license
|
TheMikeste1/Neural_Network
|
0ab96223b4a5159ee5ca17cb9dc8087222e53a0d
|
cbed302bcc235ed69b9f9a7430fb68d0db6cc1b4
|
refs/heads/master
| 2021-02-07T20:45:34.574475
| 2020-03-01T04:51:25
| 2020-03-01T04:51:25
| 244,075,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,859
|
py
|
import numpy as np
import pandas as pd
from ml_classifiers.general import Classifier
class ID3Tree(Classifier):
def __init__(self):
self._tree = None
self._classes = []
self._default = None
self._depth = 0
self._size = 0
def get_size(self):
return self._size
def get_depth(self):
return self._depth
def fit(self, dataset, targets):
if self._tree is not None:
print("Warning! Overriding previous tree!")
self._tree, self._size = self.build_tree(pd.DataFrame.to_numpy(dataset), targets, dataset.columns.values)
self._default = targets[np.argmax(targets)]
for target in targets:
if target not in self._classes:
self._classes.append(target)
def predict(self, dataset):
predictions = np.zeros((0, 0))
for _, datapoint in dataset.iterrows():
path = []
prediction = self._tree
while prediction not in self._classes:
feature = list(prediction.keys())[0]
feature_data = datapoint[feature]
path += [feature] + [feature_data]
try:
prediction = prediction[path[-2]][path[-1]]
except KeyError: # This is in case the data for the feature has not been seen before
prediction = self._default
path += [prediction]
predictions = np.append(predictions, prediction)
return predictions
@staticmethod
def calculate_entropy(node):
return -node * np.log2(node) if node != 0 else 0
def calculate_info_gain(self, data, classes, feature):
"""
Calculates the total amount of information gained by calculating entropy for each datapoint received.
:param data: The received data (typically from a CSV file).
:param classes: The classes, targets, or categories, a datapoint might be.
:param feature: The index for the attribute we are calculating for.
:return: The amount of info gained, expressed as a float from 0 - 1, 1 being the best.
"""
data_len = data.shape[0]
# Get all possible values
values = []
for data_index in range(data_len):
if data[data_index][feature] not in values:
values.append(data[data_index][feature])
feature_counts = np.zeros(len(values))
entropy_amounts = np.zeros(len(values))
info_gain = 0
for value_index in range(len(values)):
# Get each datapoint's class with this value
datapoint_classes = []
for data_index in range(data_len):
if data[data_index][feature] == values[value_index]:
datapoint_classes.append(classes[data_index])
feature_counts[value_index] += 1
# Compress all the classes into a list of relevant classes
relevant_classes = []
for aclass in datapoint_classes:
if aclass not in relevant_classes:
relevant_classes.append(aclass)
# Count how instances of each class there is
class_count = np.zeros(len(relevant_classes))
for class_index in range(len(relevant_classes)):
for aclass in datapoint_classes:
if aclass == relevant_classes[class_index]:
class_count[class_index] += 1
# Calculate entropy for each class
for class_index in range(len(relevant_classes)):
entropy_amounts[value_index] += self.calculate_entropy(class_count[class_index] / sum(class_count))
# Add weighted entropy to info_gain
info_gain += feature_counts[value_index] * entropy_amounts[value_index] # / data_len
# Not used because it would
# be constant throughout the tree
return info_gain
def build_tree(self, data, classes, features, size=1, level=0):
if level > self._depth:
self._depth = level
# Only one class left
if len(np.unique(classes)) == 1:
return classes[0], size
default_class = classes[np.argmax(classes)]
data_size = len(data)
feature_size = len(features)
# Return default if we've reached the end
if data_size == 0 or feature_size == 0:
return default_class, size
# Create tree
# Figure out which feature will give us the most info
info_gain = np.zeros(feature_size)
for feature_index in range(feature_size):
gain = self.calculate_info_gain(data, classes, feature_index)
info_gain[feature_index] = gain
# Normally we subtract gain from 1 to give us the technical amount of info gained
# but since 1 is a constant we can just take the min instead.
best_feature = np.argmin(info_gain)
tree = {features[best_feature]: {}}
# Get all possible values
values = []
for data_index in range(len(data)):
if data[data_index][best_feature] not in values:
values.append(data[data_index][best_feature])
for value in values:
data_index = 0
new_data = np.zeros((0, feature_size - 1))
new_classes = np.zeros((0, 0))
new_features = np.zeros((0, 0))
for datapoint in data:
if datapoint[best_feature] == value:
if best_feature == 0:
new_datapoint = datapoint[1:]
new_features = features[1:]
elif best_feature == feature_size:
new_datapoint = datapoint[:-1]
new_features = features[:-1]
else:
new_datapoint = datapoint[:best_feature]
new_datapoint = np.append(new_datapoint, datapoint[best_feature + 1:])
new_features = features[:best_feature]
new_features = np.append(new_features, features[best_feature + 1:])
new_data = np.vstack([new_data, new_datapoint])
new_classes = np.append(new_classes, classes[data_index])
data_index += 1
subtree, size = self.build_tree(new_data, new_classes, new_features, size + 1, level + 1)
tree[features[best_feature]][value] = subtree
return tree, size
# class ID3Tree
|
[
"michael.hegerhorst@gmail.com"
] |
michael.hegerhorst@gmail.com
|
c9116792258fccda7abc75c779a71016097f002a
|
97318679ea335cc22642a042c3c9bf4571d41496
|
/src/test/data/pa2/AdditionalTestCases_pa2/global_stmt_bind_var.py
|
288208de02a96a1e47dc4638c8d29a574ec94789
|
[
"BSD-2-Clause"
] |
permissive
|
Leo-Enrique-Wu/chocopy_compiler_semantic_analysis
|
d703190c520a11edb3052900672fa1aec465f635
|
e89f6434dd7d274d4838457316143f312226495f
|
refs/heads/main
| 2023-07-18T21:49:46.846377
| 2021-09-24T20:38:08
| 2021-09-24T20:38:08
| 410,095,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
def foo():
global x
global y
x = 1
y = "hello"
x:int = 2
y:str = ""
foo()
|
[
"hedgehogboy@gmail.com"
] |
hedgehogboy@gmail.com
|
5776dbdcae93997a6aad93c23c9a13444dbea7c4
|
6de76e0314b3dcaabcd68c9e3c0bf3c1d5b8d1a4
|
/Questao_02/Sistema_linear.py
|
83272bff0e672a40ff9ce634eccf01e5f52f4f20
|
[
"MIT"
] |
permissive
|
VictorBenoiston/Segundo_trabalho_calculo_numerico
|
511801959afae247aa680764142c6a472cbc6628
|
f5b88fc8bb56c1272e78d4b2f7e0ac9893d3b10f
|
refs/heads/main
| 2023-05-09T02:49:59.920663
| 2021-06-05T00:49:35
| 2021-06-05T00:49:35
| 373,928,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from math import sqrt
# Montando o sistema em si, é possível notarmos que c = 6,67
# Daí, ficamos com:
# Linha 1: 36a+6b+c=17.33
# Linha 2: 100a+10b+c=42.67
# Achando a e b por meio de escalonamento:
matriz = [[36, 6, 10.66], [100, 10, 36]]
# Pivô = 36
# mL2 = 100/36 = 25/9
# Achando a nova linha 2:
x = [36, 6, 10.66]
for c in range(0, 3):
elemento = matriz[1][c] - ((25/9) * matriz[0][c])
x.append(elemento)
# Temos agora que b = -0.958 e a = 0.45569
def p2x(x):
resultado = (0.45569 * (x ** 2)) - 0.958 * x + 6.667
return resultado
def p2y(y):
# resultado = (0.45569 * (x ** 2)) - 0.958 * x + (6.667 - y)
a = 0.45569
b = -0.958
c = 6.667 - y
def raizes(a, b, c):
delta = b ** 2 - (4 * a * c)
raiz_delta = sqrt(delta)
return raiz_delta
raiz_delta = raizes(a, b, c)
x1 = ((b * (-1)) + raiz_delta) / (2 * a)
x2 = ((b * (-1)) - raiz_delta) / (2 * a)
if x1 > 0:
return x1
if x2 > 0:
return x2
else:
print('inválido')
p2x = p2x(7)
print(f'O polinomio resultante será: P2(x)=0.45569x²-0.958x+6.667')
print(f'No dia 07, a amostra atinge: {p2x:.2f}g')
print(f'Utilizando interpolação inversa, vimos que a amostra chegará a marca de 10g em: x= {p2y(10):.2f}')
|
[
"victorbjo10@gmail.com"
] |
victorbjo10@gmail.com
|
2dfc00bb6a1f95ca7cf045c2248512aa733365ce
|
b714e75416a269bf2cfdbfec25d7ccf354d18668
|
/SuperMario/main.py
|
dcbcb7113df32d7563220df26093ad8b8c243fb4
|
[] |
no_license
|
TAODEI/python
|
d554e51514b24d61def45e9fe438fb1ac650dbb5
|
dcf326f74b009992028f81aa915adcc08ccc186e
|
refs/heads/main
| 2023-04-24T11:58:13.171861
| 2021-05-15T04:02:53
| 2021-05-15T04:02:53
| 360,071,538
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
import pygame
from source import tools, setup
from source.states import main_menu, load_screen, level
def main():
state_dict = {
'main_menu': main_menu.MainMenu(),
'load_screen': load_screen.LoadScreen(),
'level': level.Level(),
'game_over': load_screen.GameOver()
} #字典控制阶段
game = tools.Game(state_dict, 'main_menu')
#state = main_menu.MainMenu()
#state = load_screen.LoadScreen()
#state = level.Level()
game.run()
main()
|
[
"864978550@qq.com"
] |
864978550@qq.com
|
f505b1326929117fab2dc0b55e689f9a23bf3cc1
|
5e392947d0f396890bed8afee0a562763963c058
|
/Curso_Python/Modulo1/Condições/ex033.py
|
02f856361aca3f07fdfdccdcf10ab8a967c9c9c0
|
[] |
no_license
|
rdghenrique94/Estudos_Python
|
c2c56cb95ab6be67279938c58efb0baa0d2424da
|
f563313c55a933c1c4bfdf2edba1ba1e5aecbd28
|
refs/heads/master
| 2022-12-15T00:37:23.342136
| 2020-09-18T14:05:38
| 2020-09-18T14:05:38
| 294,685,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
def main():
num1 = int(input("Digite um numero: "))
num2 = int(input("Digite um numero: "))
num3 = int(input("Digite um numero: "))
if num1 > num2 and num1 > num3:
print("O número {} é o maior".format(num1))
elif num2 > num1 and num2 > num3:
print("O número {} é o maior".format(num2))
elif num3 > num1 and num3 > num2:
print("O número {} é o maior".format(num3))
if num1 < num2 and num1 < num3:
print("O número {} é o menor".format(num1))
elif num2 < num1 and num2 < num3:
print("O número {} é o menor".format(num2))
elif num3 < num1 and num3 < num2:
print("O número {} é o menor".format(num3))
if num1 == num2 or num1 == num3 and num2 == num1 or num2 == num3 and num3 == num1 or num3 == num2:
print("Os outros Numeros são Iguais")
main()
|
[
"rdghenrique94@gmail.com"
] |
rdghenrique94@gmail.com
|
8212fe8f37c233da19c1d6dc32dd8c73b2499923
|
0453965188c1bf49e3176426b2e3077bb6140587
|
/django_project/tenant_control/migrations/0002_create_public_tenant.py
|
41bb31fc90cdaa10bee90354053beceb5f5e3a32
|
[
"MIT"
] |
permissive
|
manishhub9/django-tenant-schemas-example
|
047a30c57ffabe20cc6746684d6462aa337fab28
|
ee7deea22665f395c5fadd9e03e4cd51a5c97230
|
refs/heads/master
| 2020-04-14T09:37:20.307930
| 2018-02-10T01:38:52
| 2018-02-10T01:38:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# Generated by Django 2.0.2 on 2018-02-04 03:47
from django.db import migrations
from tenant_schemas.models import TenantMixin
from ..models import Company
def create_public_tenant(apps, schema_editor):
tenant = Company(
domain_url='localhost',
schema_name='public',
name='Public Tenant',
)
tenant.save()
class Migration(migrations.Migration):
dependencies = [
('tenant_control', '0001_initial'),
]
operations = [
migrations.RunPython(create_public_tenant)
]
|
[
"victor_o_silva@hotmail.com"
] |
victor_o_silva@hotmail.com
|
d104593830d76d1aaf642c47f48d2a6f62c23e01
|
53c4fe8ca6e11d130ef013fd1d9d62bb15eefa54
|
/app/auth/views.py
|
80eb2c0d6a64501cd7b77b93f8037f906f56031e
|
[] |
no_license
|
youhaowei/flask-starter
|
40fc4d31a588c8ff2b562e8297d86ec86bb72c0f
|
9f569894c30f3b159c328cf3ffcbd6cd8ae15f02
|
refs/heads/master
| 2021-01-21T03:30:09.517766
| 2016-09-19T22:59:01
| 2016-09-19T22:59:01
| 68,652,906
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
from flask import render_template, redirect, request, url_for
from ..email import send_email
from app import flash, db
from flask_login import (
login_user, login_required, logout_user, current_user
)
from . import auth
from app.models.user import User, User_Profile
from .forms import LoginForm, RegisterForm
from flask_babel import gettext as _
@auth.before_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email_or_username.data).first()
if user is None:
user = User.query.filter_by(
username=form.email_or_username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash(_('Invalid user name or password'), 'd')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 's')
return redirect(request.args.get('next') or url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data,
password=form.password.data)
profile = User_Profile(id=user.id)
db.session.add(user)
db.session.add(profile)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash(_("A confirmation email has been sent to you by email."), "s")
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash(_("You have confirmed your account. Thanks!"), 's')
else:
flash(
_("The confirmation link is invalid or has expired." +
"click <a href='{{url_for('auth.resend_confirmation'}}'>" +
"here</a>" +
"to resend the link"), 'd')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, "Confirm Your Account",
'auth/email/confirm', user=current_user, token=token)
flash(_("A confirmation email has been sent to you by email."), "s")
return redirect(request.args.get('next') or url_for('main.index'))
|
[
"youhaowei@email.arizona.edu"
] |
youhaowei@email.arizona.edu
|
88022e5cb1e07c52fdaee590a1bc55e811b3b7f4
|
068a694df077db7c1678dd494572b757a100b96c
|
/pic_gen.py
|
065f27d4396c26e645f3390f0f9d7f9cc2837ad7
|
[] |
no_license
|
liangcyn/a_puzzle_for_you
|
e51b925969ebcc371142c67c4013c11eda250299
|
e99a5a15291101103cf3159783c898c3027f53a3
|
refs/heads/master
| 2021-01-09T18:51:55.367400
| 2020-02-22T23:20:55
| 2020-02-22T23:20:55
| 242,416,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import numpy
from PIL import Image
def convert_string_to_color(s):
ls = []
for c in s:
ls.append(ord(c))
return ls
result = convert_string_to_color("a b c hi tom! welcome to the first round of riddles. your code to go on is: brannerdining. send me the code to move onto the next round.")
data = numpy.zeros((1, 1000, 3), dtype=numpy.uint8)
for i in range(len(result)):
data[0,i] = [result[i], 0, 0]
image = Image.fromarray(data)
image.save("lets_start_at_the_very_beginning.png")
|
[
"noreply@github.com"
] |
liangcyn.noreply@github.com
|
93ce57e5835e7b6413df8b239027e71104e8a5ad
|
3ed50263057c1695330009f9f5b122e412e1c02f
|
/test/bn/test_relational.py
|
383de69cc7afbedbe3b6b2fab8d7ec6777fae6ab
|
[
"MIT"
] |
permissive
|
ppijbb/PyOpenDial
|
5528aa584190dcf08b892ec92a5ce8c2b82eb845
|
c9bca653c18ccc082dc8b86b4a8feee9ed00a75b
|
refs/heads/master
| 2022-02-16T01:27:39.667661
| 2019-07-24T10:51:41
| 2019-07-24T10:51:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
from bn.values.value_factory import ValueFactory
from datastructs.assignment import Assignment
from dialogue_system import DialogueSystem
from readers.xml_domain_reader import XMLDomainReader
from templates.relational_template import RelationalTemplate
def word_cnt_func(*value):
if len(value) != 1:
raise ValueError()
str_val = value[0]
return ValueFactory.create([len(str_val), len(str_val.split(' '))])
class TestRelation:
def test_relational(self):
rel = ValueFactory.create("[sees|tag:VB subject>John object>Anne instrument>[telescope|tag:NN colour>red|tag:ADJ]]")
assert len(rel) == 5
assert ValueFactory.create("telescope") in rel.get_sub_values()
assert str(rel.get_nodes()[0].get_content()) == "sees"
t = RelationalTemplate("[sees subject>John]")
assert len(t.get_matches(rel)) == 1
t = RelationalTemplate("[sees {S}>John]")
assert len(t.get_matches(rel)) == 1
assert str(t.get_matches(rel)[0].get_value("S")) == "subject"
t = RelationalTemplate("[sees {S}>{O}]")
assert len(t.get_matches(rel)) == 3
assert str(t.get_matches(rel)[0].get_value("S")) == "instrument"
assert str(t.get_matches(rel)[0].get_value("O")) == "telescope"
t = RelationalTemplate("[{V}|tag:{T} subject>{X} object>{Y}]")
assert str(t.get_matches(rel)[0].get_value("V")) == "sees"
assert str(t.get_matches(rel)[0].get_value("T")) == "VB"
assert str(t.get_matches(rel)[0].get_value("X")) == "John"
assert str(t.get_matches(rel)[0].get_value("Y")) == "Anne"
t = RelationalTemplate("[sees +>red|tag:{X}]")
assert len(t.get_matches(rel)) == 1
assert str(t.get_matches(rel)[0].get_value("X")) == "ADJ"
rel2 = ValueFactory.create("[sees|tag:VB object>Anne instrument>[telescope|tag:NN colour>red|tag:ADJ] subject>John]")
assert rel2 == rel
assert hash(rel2) == hash(rel)
assert ValueFactory.create("Anne") in rel2
t = RelationalTemplate("[sees {S}>John]")
assert len(t.get_slots()) == 1
assert t.fill_slots(Assignment("S", "subject")) == "[sees subject>John]"
def test_function(self):
d = XMLDomainReader.extract_domain("test/data/relationaltest.xml")
system = DialogueSystem(d)
system.get_settings().show_gui = False
system.start_system()
|
[
"jys5609@gmail.com"
] |
jys5609@gmail.com
|
d36f47941ddd550e4e5cc68b142177bd27725359
|
f37dcae9222b2244f23611dffc92e38d7e8fefec
|
/list/Todolist/models.py
|
e06f927d378113186198dd39342a7ba4b0b696cf
|
[] |
no_license
|
Watotacho/Todo-list-Django
|
2a653d2751f8dc1b62f42b3e7d847e4807bbd4c4
|
0c084849cb34c2806842412f127baf1268da997e
|
refs/heads/main
| 2023-06-29T12:06:51.100754
| 2021-08-12T17:41:02
| 2021-08-12T17:41:02
| 395,207,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
from django.db import models
class Task(models.Model):
title=models.CharField(max_length=200)
complete=models.BooleanField(default=False)
created=models.DateTimeField(auto_now_add=True)
due=models.DateTimeField(auto_now_add=False,auto_now=False,blank=True,null=True)
def __str__(Self):
return Self.title
# Create your models here.
|
[
"watotacho@gmail.com"
] |
watotacho@gmail.com
|
af5f354f81ae0c08ae4f4a0c229334d3108f2599
|
491ff318cb13f63d300a0602ee733b485b005987
|
/optometria/models.py
|
103a39d84bb53182173d3fc60e62c114fb3f710a
|
[
"MIT"
] |
permissive
|
radianx/curso_python_polotic
|
d982c4c9a4dc8058af54cb2d5d3c7fff02723adb
|
a6184dbbc1bcedd6733589afca634360a32e2c0e
|
refs/heads/main
| 2023-01-22T12:33:54.004600
| 2020-12-02T22:47:45
| 2020-12-02T22:47:45
| 317,708,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,517
|
py
|
import datetime
from django.contrib.auth.models import AbstractUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
# Create your models here.
class Producto(models.Model):
nombre = models.CharField(max_length=200)
tipo = models.CharField(max_length=200)
precio = models.DecimalField(
max_digits=8,
decimal_places=2
)
LEJOS = 'LE'
CERCA = 'CE'
DISTANCIA_CHOICES = [
(LEJOS, "Lejos"),
(CERCA, "Cerca")
]
IZQUIERDA = 'IQ'
DERECHA = 'DR'
LADO_CHOICES = [
(IZQUIERDA, "Izquierda"),
(DERECHA, "Derecha")
]
distancia = models.CharField(
max_length=2,
choices=DISTANCIA_CHOICES,
default=LEJOS
)
lado = models.CharField(
max_length=2,
choices=LADO_CHOICES,
default=DERECHA
)
CON_ARMAZON = 'CA'
SIN_ARMAZON = 'SA'
ARMAZON_CHOICES = [
(CON_ARMAZON, "Si"),
(SIN_ARMAZON, "No")
]
armazon = models.CharField(
max_length=2,
choices=ARMAZON_CHOICES,
default=CON_ARMAZON
)
def __str__(self):
return self.nombre
class Pedido(models.Model):
fechaDePedido = models.DateTimeField()
montoTotal = models.DecimalField(max_digits=19, decimal_places=4)
vendedor = models.ForeignKey('Vendedor', null=True, on_delete=models.SET_NULL)
paciente = models.ForeignKey('Paciente', null=True, on_delete=models.SET_NULL)
CREDITO = 'CR'
DEBITO = 'DB'
VIRTUAL = 'VT'
EFECTIVO = 'EF'
MONEY_CHOICES = [
(CREDITO, "Tarjeta de Credito"),
(DEBITO, "Tarjeta de Debito"),
(VIRTUAL, "Billetera Virtual"),
(EFECTIVO, "Efectivo")
]
tipoDePago = models.CharField(
max_length=2,
choices=MONEY_CHOICES,
default=EFECTIVO
)
PENDIENTE = 'PT'
PEDIDO = 'PD'
TALLER = 'TL'
FINALIZADO = 'FN'
ESTADOS_DE_PEDIDO = [
(PENDIENTE, "Pendiente"),
(PEDIDO, "Pedido"),
(TALLER, "Enviado a Taller"),
(FINALIZADO, "Finalizado")
]
estado = models.CharField(
max_length=2,
choices=ESTADOS_DE_PEDIDO,
default=PENDIENTE
)
class ProductoPedido(models.Model):
producto = models.ForeignKey(Producto, null=True, on_delete=models.SET_NULL)
cantidad = models.IntegerField(default=1)
pedido = models.ForeignKey(Pedido, null=True, on_delete=models.SET_NULL)
def getSubTotal(self):
return self.producto.precio * self.cantidad
class Paciente(models.Model):
dni = models.BigIntegerField()
email = models.CharField(max_length=200)
telefono = models.CharField(max_length=15)
nombre = models.CharField(max_length=200)
def __str__(self):
return self.nombre
class Turno(models.Model):
fechaDeTurno = models.DateTimeField()
paciente = models.ForeignKey('Paciente', on_delete=models.CASCADE)
secretaria = models.ForeignKey('Secretaria', on_delete=models.CASCADE)
class HistorialMedico(models.Model):
paciente = models.ForeignKey('Paciente', on_delete=models.CASCADE)
turno = models.ForeignKey('Turno', on_delete=models.CASCADE)
observaciones = models.CharField(max_length=500)
personalMedico = models.ForeignKey('PersonalMedico', on_delete=models.CASCADE)
class PersonalMedico(models.Model):
usuario = models.OneToOneField('Usuario', on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return self.usuario.nombre + " " + self.usuario.apellido
class Secretaria(models.Model):
usuario = models.OneToOneField('Usuario', on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return self.usuario.nombre + " " + self.usuario.apellido
class Vendedor(models.Model):
usuario = models.OneToOneField('Usuario', on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return self.usuario.nombre + " " + self.usuario.apellido
class Tecnico(models.Model):
usuario = models.OneToOneField('Usuario', on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return self.usuario.nombre + " " + self.usuario.apellido
class Gerente(models.Model):
usuario = models.OneToOneField('Usuario', on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return self.usuario.nombre + " " + self.usuario.apellido
class UsuarioManager(BaseUserManager):
def create_user(self, email, name, lastname, phone, date_of_birth, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
nombre=name,
apellido=lastname,
telefono=phone,
fecha_nac=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, lastname, phone, date_of_birth, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.create_user(
email=self.normalize_email(email),
nombre=name,
apellido=lastname,
telefono=phone,
fecha_nac=date_of_birth,
)
user.set_password(password)
user.is_admin = True
user.save(using=self._db)
return user
class Usuario(AbstractUser):
first_name = None
last_name = None
username = models.CharField(max_length=32)
nombre = models.CharField(max_length=200)
password = models.CharField(max_length=256)
email = models.CharField(max_length=200, unique=True, primary_key=True)
apellido = models.CharField(max_length=200)
telefono = models.CharField(max_length=200)
fecha_nac = models.DateTimeField(default=None, blank=True, null=True)
is_admin = False
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['nombre', 'apellido', 'username']
def __str__(self):
return self.email
def has_related_object(self, related):
return hasattr(self, related)
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
|
[
"sbardemadrian@gmail.com"
] |
sbardemadrian@gmail.com
|
bcfc97326b9d092814bd302529cd0dad2d2ec43b
|
ae8121b55decd79e2f9721ed642dc4ffab862256
|
/src/apps/even/admin/__init__.py
|
382772d13fa13e5057e1e2b535af622532bbebf7
|
[] |
no_license
|
jaimescose/eventuality
|
0fc6b6cdd0ec5ef8bde1339285d0b722e4912a4d
|
8419c1f1aef81646df3068faec71d3b39e138af0
|
refs/heads/master
| 2022-12-16T18:14:31.675712
| 2020-09-12T00:11:58
| 2020-09-12T00:11:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# event admins
from .eventAdmin import EventAdmin
from .eventPromoterAdmin import EventPromoterAdmin
from .eventCategoryAdmin import EventCategoryAdmin
# user admins
from .userProfileAdmin import CustomUserAdmin
|
[
"jjescobar@uninorte.edu.co"
] |
jjescobar@uninorte.edu.co
|
a2e0baaab82c1c74f95def6868ecd4bd3fb23848
|
5cb69a4597c2b1b15bbd752520d2b7edc18a5ab4
|
/deye/deye/wsgi.py
|
d6c413db4e31966cd367616acda7cc73135186d5
|
[] |
no_license
|
shashanksbelvadi/deye-website-1
|
31a547d72969a487f8c4e4fe2ad6ef5226843154
|
50a7e760de707d7ba7731b76dca6d6abcc747412
|
refs/heads/master
| 2021-06-02T04:43:12.743392
| 2016-06-20T04:22:16
| 2016-06-20T04:22:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for deye project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "deye.settings")
application = get_wsgi_application()
|
[
"sbelvadi@salesforce.com"
] |
sbelvadi@salesforce.com
|
b7edf6dc0141012cd8f62de7e378396f1e0739a9
|
4ff9b48e7366bb20550260d1fe442a91141fc30b
|
/blogengine/blogengine/urls.py
|
503d4eec7d5a2ab4c4a9647d09ec1b3c5b8c6cb6
|
[] |
no_license
|
RowdyKGZ/oleg_blog_tutorial
|
d50dfee236e87688d51e88b681fdbe03da476b91
|
cb9b510cdde9fa3499b55532c684319f57ce7708
|
refs/heads/master
| 2023-02-02T23:25:38.874971
| 2020-12-25T10:40:11
| 2020-12-25T10:40:11
| 324,225,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from django.contrib import admin
from django.urls import path, include
from .views import redirect_blog
urlpatterns = [
path('', redirect_blog),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
]
|
[
"RowdyKG@gmail.com"
] |
RowdyKG@gmail.com
|
ea2016e25fa5a17854aaf007a2ee143c0c653836
|
51a902289b357ad6cf467e51c9740fa2a07c1c1c
|
/first_sem/lab_8/protect8.py
|
e0d63133936edc813db231c92d4d6956d9a1dd5a
|
[] |
no_license
|
ivaaahn/bmstu-python
|
d3910356353b0ab3faab243a53a1d657b7d793ad
|
9f2d8eb390d804ad86545c992add2a6d97148a5d
|
refs/heads/main
| 2023-03-24T08:52:12.723117
| 2021-03-16T13:19:46
| 2021-03-16T13:19:46
| 348,353,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
def f(x: float) -> float:
return x*x*x
def F(x: float) -> float:
return x*x*x*x/4
def right(a, b, nseg):
h = (b - a) / nseg
ans = f(b)
for i in range(1, nseg):
ans += f(a + i * h)
ans *= h
return ans
a = float(input('Введите a: '))
b = float(input('Введите b: '))
N = int(input('Введите число разбиений N: '))
answer = right(a, b, N)
print('Интеграл равен: {}'.format(answer))
|
[
"ivahnencko01@gmail.com"
] |
ivahnencko01@gmail.com
|
2d209e77a3b25cc9a15943f774ba376252af4916
|
3f161cdb8275c607f0c293a868f9fa830d23b8d4
|
/usecase_management_app/admin.py
|
365cde6b849f474f7bde8e9661e8ca39e3748e65
|
[] |
no_license
|
williamyang900211/usecase
|
1324e98eb2a27f6f1eddcce1099113ce52444169
|
ad186445bce30e4b12025b371ea57c2ae9cbc80e
|
refs/heads/master
| 2020-04-14T08:56:29.864143
| 2019-01-02T09:05:37
| 2019-01-02T09:05:37
| 163,747,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.contrib import admin
from .models import test_bill,use_case
# Register your models here.
admin.site.register(test_bill)
admin.site.register(use_case)
|
[
"williamyang900211@users.noreply.github.com"
] |
williamyang900211@users.noreply.github.com
|
dad6b23b33058eb36bc42a8e3af0ca2ff9dbea45
|
3e2d291b85f4012876fc449762d65247adcd89fb
|
/exe/amii_tf_nn_mnist_example
|
8902a436aec4d54c87862764066de7549fd8b8fd
|
[
"MIT"
] |
permissive
|
AmiiThinks/amii-tf-nn
|
82c916ebfd179d8303454d4da73d8ce5ececae48
|
73adb7bb150d41581315ce2d02a8617f85cbb571
|
refs/heads/master
| 2021-05-23T06:07:58.800160
| 2018-03-21T02:09:45
| 2018-03-21T02:09:45
| 94,800,250
| 1
| 1
| null | 2017-07-03T23:24:22
| 2017-06-19T16:57:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,100
|
#!/usr/bin/env python
import os
import tensorflow as tf
from tensorflow.python.layers.core import Dense as DenseLayer
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from amii_tf_nn.data import Data, BatchedData
from amii_tf_nn.data_set import DataSet
from amii_tf_nn.experiment import TensorboardExperiment
from amii_tf_nn.classifier import CrossEntropyClassifer
from amii_tf_nn.network_model import NetworkModel
from amii_tf_nn.layer import Layer
from amii_tf_nn.trainer import EvalTrainer
class AdamCrossEntropyClassifer(CrossEntropyClassifer):
def _create_evals(self):
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(
tf.argmax(self.model.post_activation(), 1),
tf.argmax(self.target_node, 1)
)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope('L2_distance'):
distance = 1 / 2.0 * tf.reduce_mean(
tf.square(self.model.post_activation() - self.target_node)
)
return {'accuracy': acc, 'L2_distance': distance}
def _create_optimizer(self, surrogate_eval_node):
with tf.name_scope('adam_training'):
node = tf.train.AdamOptimizer(
**self.optimization_params
).minimize(surrogate_eval_node)
return node
def mnist_data():
mnist = input_data.read_data_sets("tmp/MNIST_data/", one_hot=True)
return DataSet(
training=Data(mnist.train.images, mnist.train.labels),
validation=Data(
mnist.validation.images,
mnist.validation.labels
),
testing=Data(mnist.test.images, mnist.test.labels)
)
def batched_mnist_data(batch_size):
mnist = mnist_data()
for k in mnist.keys():
mnist[k] = BatchedData.from_data(mnist[k], batch_size=batch_size)
return mnist['training'], mnist
def main():
experiment = TensorboardExperiment(
'amii_tf_nn_mnist_example',
root=os.path.join(os.getcwd(), 'tmp'),
seed=1,
tag='1',
log_level=tf.logging.INFO
)
experiment.ensure_present()
training_data, eval_data = batched_mnist_data(100)
input_node = tf.placeholder(
tf.float32,
shape=(None, training_data.num_features()),
name="input"
)
target_node = tf.placeholder(
tf.float32,
shape=(None, training_data.num_outputs()),
name='target'
)
hidden = 1024
adln = AdamCrossEntropyClassifer(
NetworkModel.factory(
'adln',
input_node,
Layer.factory(
DenseLayer(
hidden,
use_bias=True,
name='layer_1'
),
activation=tf.nn.relu
),
Layer.factory(
DenseLayer(
training_data.num_outputs(),
use_bias=True,
name='layer_2'
),
activation=tf.nn.softmax
)
),
'AdamDoubleLayerFeedForward',
target_node
)
asln = AdamCrossEntropyClassifer(
NetworkModel.factory(
'asln',
input_node,
Layer.factory(
DenseLayer(
training_data.num_outputs(),
use_bias=True,
name='layer'
),
activation=tf.nn.softmax
)
),
'AdamSingleLayerFeedForward',
target_node
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf.summary.FileWriter(experiment.path(), sess.graph)
EvalTrainer(
experiment.path(),
eval_data,
sess,
training_data,
adln,
asln,
epochs_between_evaluations=5,
batches_per_epoch=2
).run()
if __name__ == '__main__': main()
|
[
"dmorrill10@gmail.com"
] |
dmorrill10@gmail.com
|
|
79fe7ebdbfed2be18858dfee9f9d33a57a7713ef
|
b76c2dabb9580f7638f496521ac030071b090bbe
|
/water_app/views.py
|
bdc9a70126c74fe2036de3eec6575942a6c542b3
|
[] |
no_license
|
sathishkumarkandaswamy/water_project
|
44af709281348889f0c78a72b2a396422989e3b3
|
0b0f5a74189e2f08f3bc7222c46f3994149960de
|
refs/heads/master
| 2020-05-02T19:16:19.215882
| 2019-04-03T05:32:59
| 2019-04-03T05:32:59
| 178,154,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
from django.shortcuts import render
from water_app.forms import WaterParameterForm
from water_app.models import WaterParameter
def index(request):
data = 'Samples'
sample_list = WaterParameter.objects.values('id', 'title', 'description')
return render(request, 'water_app/index.html', {'data': data, 'sample_list': sample_list})
def water_add(request):
'''
Add water Parameter
'''
page = 'water_app/add.html'
message = ""
if request.method == "POST":
form = WaterParameterForm(request.POST)
if form.is_valid():
try:
form.save()
message = "Parameter submitted successfully"
return redirect('')
except:
pass
else:
form = WaterParameterForm()
return render(request, page, {'form':form, 'message': message} )
def dashboard(request, sample_id=None):
'''
Dashboard
'''
page = 'water_app/dashboard.html'
message = ""
data = ""
print(sample_id)
data = WaterParameter.objects.get(id=sample_id)
return render(request, page, {'data': data})
|
[
"sathishkumar.kswamy@gmail.com"
] |
sathishkumar.kswamy@gmail.com
|
51f4b150c71f47caec71513c3efe1e9d6f859d1e
|
2726f4f6a4121c1b36fad303ad0ee507aa813ad5
|
/dev/tries_checks/checks/WFalignment_FD.py
|
875197c10865167cbb66f40e856a92c42fa87595
|
[
"CC-BY-4.0"
] |
permissive
|
stefanoschmidt1995/MLGW
|
3eb63a6682a241856f0485594231c907b2f23359
|
a786e9ce5845ba1f82980c5265307914c3c26e68
|
refs/heads/master
| 2023-09-04T04:59:06.392654
| 2023-08-29T15:56:23
| 2023-08-29T15:56:23
| 221,709,165
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
import numpy as np
import lal
import lalsimulation as lalsim
#export LAL_DATA_PATH=/home/stefano/Documents/Stefano/scuola/uni/tesi_magistrale/code/data_ROM/
def align_ph(wf):
amp = np.abs(wf)
ph = np.unwrap(np.angle(wf))
ph = ph - ph[0]
return amp*np.exp(1j*ph)
def generate_waveform(m1,m2):
mtot = (m1+m2)*lal.MTSUN_SI
f_min = 20.0
f_max = 2048.0
df = 1./32.
f_rescaled_min = f_min*mtot
f_rescaled_max = f_max*mtot
df_rescaled = mtot*df
hptilde, hctilde = lalsim.SimInspiralChooseFDWaveform( #where is its definition and documentation????
m1*lalsim.lal.MSUN_SI, #m1
m2*lalsim.lal.MSUN_SI, #m2
0., 0., .5, #spin vector 1
0., 0., 0., #spin vector 2
1.*1e6*lalsim.lal.PC_SI, #distance to source
0., #inclination
0., #phi ref
0., #longAscNodes
0., #eccentricity
0., #meanPerAno
1e-3, # frequency incremental step
f_min, # lowest value of frequency
f_max, # highest value of frequency
f_min, #some reference value of frequency (??)
lal.CreateDict(), #some lal dictionary
# lalsim.GetApproximantFromString('IMRPHenomPv2') #approx method for the model
lalsim.GetApproximantFromString('SEOBNRv4_ROM') #approx method for the model
)
frequency = np.linspace(0.0, f_max, hptilde.data.length)
rescaled_frequency = frequency*mtot
print(mtot)
return frequency, rescaled_frequency, hptilde.data.data+1j*hctilde.data.data
q = 15.
m1 = 5.0
m1c = (m1*q*m1)**(3./5.)/(m1+m1*q)**(1./5.)
m2 = 15.0
m2c = (m2*q*m2)**(3./5.)/(m2+m2*q)**(1./5.)
m1tot = (1+q)*m1
m2tot = (1+q)*m2
f1,fr1,wf1 = generate_waveform(m1,m1)
f2,fr2,wf2 = generate_waveform(m2,m2)
#wf2 = np.interp(fr1,fr2,wf1)
wf1 = align_ph(wf1)
wf2 = align_ph(wf2)
amp1= np.abs(wf1)
amp2= np.abs(wf2)
ph1 = np.unwrap(np.angle(wf1))
ph2 = np.unwrap(np.angle(wf2))
#wf3 = (m1c/m2c)**(-2./6.)*np.interp(f1,f1*m1/m2,wf1)*m2/m1
#wf3 = m2/m1*np.interp(fr2, fr1, wf1)
#phi = np.interp(f1/m2, f1/m1, phi)
#wf3 = np.interp(f2, f1/m2, wf3)
print(amp1,amp2)
#mistery???
t1 = 2.18 * (1.21/m1c)**(5./3.) * (100/f1[np.nonzero(amp1)[0][0]])**(8./3.)
t2 = 2.18 * (1.21/m2c)**(5./3.) * (100/f2[np.nonzero(amp2)[0][0]])**(8./3.)
#print(t1,t2)
import matplotlib.pyplot as plt
fig = plt.figure()
plt.title('ph')
ax = fig.add_subplot(111)
#ax.plot(fr1, np.unwrap(np.angle(wf1*np.exp(-1j*2*np.pi*f1*t1))).real, color='b')
#ax.plot(fr2, np.unwrap(np.angle(wf2*np.exp(-1j*2*np.pi*f2*t2))).real, color='k')
ax.plot(fr1, np.unwrap(np.angle(wf1)), color='b')
ax.plot(fr2, np.unwrap(np.angle(wf2)), color='k')
fig = plt.figure()
plt.title('amp')
ax = fig.add_subplot(111)
ax.plot(fr1, np.abs(wf1), color='b')
ax.plot(fr2, np.abs(wf2), color='k')
#ax.plot(fr2, wf3, color='r')
plt.show()
quit()
fig = plt.figure()
plt.title('interpolated prediction')
ax = fig.add_subplot(111)
ax.plot(f2, wf2, color = 'k')
ax.plot(f2, wf3, color = 'red')
plt.show()
|
[
"stefanoschmidt1995@gmail.com"
] |
stefanoschmidt1995@gmail.com
|
2810cc60e0e0239a6ea7d016792eb16474eb9bca
|
e3f02b98701a11d8de2dbb495ef729bdd88189c2
|
/scripts/HebbNet.py
|
63c21b407c8df012059e48df5b43477f8ec6b5d9
|
[] |
no_license
|
RoncoLuis/artificial_neural_net
|
a315068e70a2fac22555df5fa74fe7fb60512c5c
|
f389591a7244a1ebb744248a62b65d4bad4c8102
|
refs/heads/master
| 2020-09-06T20:04:02.787909
| 2019-12-28T00:54:03
| 2019-12-28T00:54:03
| 220,535,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
"""
Neural net. Hebb Rule
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def step_function(x):
return 1 if x >= 0 else -1
def perceptron_output(weights,bias,x):
calculation = np.dot(weights,x)+bias
return step_function(calculation)
#data = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])
#target = np.array([])
#tabla de verdad compuerta and
tt_and = {
"x1":[-1,-1,1,1],
"x2":[-1,1,-1,1],
"target":[-1,-1,-1,1]
}
tt_and = pd.DataFrame(tt_and)
data = tt_and.columns.tolist()[:-1]
target = tt_and.columns.tolist()[-1]
X = np.array(tt_and[data])
y = np.array(tt_and[target])
#funcion de propagacion
print(X.T,y)
|
[
"w580ix@gmail.com"
] |
w580ix@gmail.com
|
b84503e8fadbd67290f101438ea24c0504bcc8c3
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnn1082.py
|
b0521cb60dab8b5265b9ec49e215a86b7b79abea
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 64
|
py
|
ii = [('LeakWTI4.py', 1), ('MereHHB3.py', 1), ('MereHHB.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
756ad8f5d9df9d40b592c7ff7731514b9031114e
|
22b6eed87bc09bc93c78671b9afaae6c9ddd8b56
|
/ch09/treeExplore.py
|
3244543883bdee30332f1c679603e98f017e39de
|
[] |
no_license
|
zraul/MachineLearningInAction
|
934a4b8ef1ab6c0b44fa16d7c55e96648a0faf1b
|
bef85c70474c6514074fa56c7f9071cc71b90a83
|
refs/heads/master
| 2018-09-10T21:24:08.805026
| 2018-06-21T07:30:14
| 2018-06-21T07:30:14
| 109,653,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# coding:utf-8
from numpy import *
from Tkinter import *
import regTrees
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def reDraw(tolS, tolN):
reDraw.f.clf() # clear the figure
reDraw.a = reDraw.f.add_subplot(111)
if chkBtnVar.get():
if tolN < 2: tolN = 2
myTree = regTrees.createTree(reDraw.rawDat, regTrees.modelLeaf, \
regTrees.modelErr, (tolS, tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat, \
regTrees.modelTreeEval)
else:
myTree = regTrees.createTree(reDraw.rawDat, ops=(tolS, tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat)
reDraw.a.scatter(reDraw.rawDat[:, 0].A.tolist(), reDraw.rawDat[:, 1].A.tolist(), s=5) # use scatter for data set
reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0) # use plot for yHat
reDraw.canvas.draw()
def getInputs():
try:
tolN = int(tolNentry.get())
except:
tolN = 10
print "enter Integer for tolN"
tolNentry.delete(0, END)
tolNentry.insert(0, '10')
try:
tolS = float(tolSentry.get())
except:
tolS = 1.0
print "enter Float for tolS"
tolSentry.delete(0, END)
tolSentry.insert(0, '1.0')
return tolN, tolS
def drawNewTree():
tolN, tolS = getInputs() # get values from Entry boxes
reDraw(tolS, tolN)
root = Tk()
reDraw.f = Figure(figsize=(5, 4), dpi=100) # create canvas
reDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)
reDraw.canvas.draw()
reDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)
Label(root, text="tolN").grid(row=1, column=0)
tolNentry = Entry(root)
tolNentry.grid(row=1, column=1)
tolNentry.insert(0, '10')
Label(root, text="tolS").grid(row=2, column=0)
tolSentry = Entry(root)
tolSentry.grid(row=2, column=1)
tolSentry.insert(0, '1.0')
Button(root, text="ReDraw", command=drawNewTree).grid(row=1, column=2, rowspan=3)
chkBtnVar = IntVar()
chkBtn = Checkbutton(root, text="Model Tree", variable=chkBtnVar)
chkBtn.grid(row=3, column=0, columnspan=2)
reDraw.rawDat = mat(regTrees.loadDataSet('sine.txt'))
reDraw.testDat = arange(min(reDraw.rawDat[:, 0]), max(reDraw.rawDat[:, 0]), 0.01)
reDraw(1.0, 10)
root.mainloop()
|
[
"zhengraul@gmail.com"
] |
zhengraul@gmail.com
|
e9a7fd41bb6fcba5efc5ace3e0c3518e0d32db7b
|
c3dc3a6d8a4619eec1ae4ca35d5422bd3b1824ad
|
/app/core/management/commands/wait_for_db.py
|
2474ab7c29a5130e52760c152f33d2e763bd41a1
|
[] |
no_license
|
SadiqUltra/recipe-api
|
9aa196227f27f1f07df7e5c3cbd99297d6edd1cd
|
235f4f4dc671ce7739862925c28ef84730acb364
|
refs/heads/master
| 2023-02-17T00:46:27.858628
| 2021-01-08T21:01:07
| 2021-01-08T21:01:07
| 302,263,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
[
"sadikultra@gmail.com"
] |
sadikultra@gmail.com
|
fa288e2bda02e4f460a9e303967d5ad0abebc0cb
|
94b14f6498c1bbc11686414f36f93582d94060dd
|
/scripts/thesis-figs-jclim.py
|
e10b263a869fb2c3b181b844a3c36ee5ebaa16ab
|
[
"MIT"
] |
permissive
|
jenfly/monsoon-onset
|
fe1f72ae3e5faccce158f7ea87609f8e0ecdca31
|
6d8651a337daa174960e716d378292452db77246
|
refs/heads/master
| 2021-01-18T23:55:55.044244
| 2017-09-01T19:48:42
| 2017-09-01T19:48:42
| 43,531,517
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,097
|
py
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xarray as xray
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import collections
import pandas as pd
import atmos as atm
import indices
import utils
# Format for article publication or presentation slides
pres = True
if pres:
figwidth = 12
style = atm.homedir() + 'dynamics/python/mpl-styles/presentation.mplstyle'
else:
figwidth = 7.48
style = atm.homedir() + 'dynamics/python/mpl-styles/grl_article.mplstyle'
plt.style.use(style)
fontsize = mpl.rcParams['font.size']
labelsize = fontsize + 3
dashes = [6, 2]
# ----------------------------------------------------------------------
version = 'merra2'
yearstr = '1980-2015'
datadir = atm.homedir() + 'datastore/%s/figure_data/' % version
pcp_nm = 'GPCP'
ind_nm = 'onset'
lon1, lon2 = 60, 100
lat1, lat2 = 10, 30
eqlat1, eqlat2 = -5, 5
plev_ubudget = 200
npre, npost = 120, 200
datafiles = {}
datafiles['ubudget'] = datadir + 'merra2_ubudget_1980-2014_excl.nc'
filestr = datadir + version + '_%s_' + yearstr + '.nc'
for nm in ['latp', 'hov', 'latlon', 'tseries', 'psi_comp', 'ebudget']:
datafiles[nm] = filestr % nm
datafiles['gpcp'] = datadir + 'gpcp_dailyrel_1997-2015.nc'
datafiles['index'] = filestr % 'index_CHP_MFC'
datafiles['mld'] = atm.homedir() + 'datastore/mld/ifremer_mld_DT02_c1m_reg2.0.nc'
mfcbudget_file = datadir + 'merra2_mfc_budget_1980-2015.nc'
nroll_mfcbudget = 5
# ----------------------------------------------------------------------
# Read data
data = {}
for nm in datafiles:
if nm == 'mld':
decode_times = False
else:
decode_times = True
print('Loading ' + datafiles[nm])
with xray.open_dataset(datafiles[nm], decode_times=decode_times) as ds:
data[nm] = ds.load()
tseries = data['tseries']
index = data['index']
index['length'] = index['retreat'] - index['onset']
data_hov = {nm : data['hov'][nm] for nm in data['hov'].data_vars}
data_hov['GPCP'] = data['gpcp']['PCP_SECTOR']
# Temporary fix for missing data in THETA_E_LML
var = data_hov['THETA_E_LML']
dmin, dmax = var['dayrel'].values.min(), var['dayrel'].values.max()
d1, d2 = 178, 183
var1 = var.sel(dayrel=range(dmin, d1))
var2 = var.sel(dayrel=range(d2, dmax + 1))
data_hov['THETA_E_LML'] = xray.concat((var1, var2), dim='dayrel')
# Surface moist static energy
Cp = atm.constants.Cp.values
Lv = atm.constants.Lv.values
data_hov['MSE_LML'] = (data_hov['TLML'] * Cp + data_hov['QLML'] * Lv) / 1e3
data_hov['MSE_LML'].name = 'MSE_LML'
data_hov['MSE_LML'].attrs['units'] = 'kJ kg^-1'
data_latp = data['latp']
data_latlon = {nm : data['latlon'][nm] for nm in data['latlon'].data_vars}
dlist = data['latlon']['dayrel'].values
data_latlon['GPCP'] = data['gpcp']['PCP'].sel(dayrel=dlist)
data_diff = {}
for nm in data_latlon:
data_diff[nm] = data_latlon[nm][1] - data_latlon[nm][0]
subset_dict = {'plev' : (plev_ubudget, plev_ubudget)}
ubudget = atm.subset(data['ubudget'], subset_dict, squeeze=True)
for nm in ['U', 'V']:
ubudget[nm] = atm.squeeze(data_latp[nm].sel(lev=plev_ubudget))
ubudget = ubudget.rename({'ADV_AVG' : 'ADV_MMC', 'COR_AVG' : 'COR_MMC',
'ADV_CRS' : 'CRS', 'PGF_ST' : 'PGF'})
ebudget = data['ebudget']
ebudget_eq = atm.dim_mean(ebudget, 'lat', eqlat1, eqlat2)
ebudget_sector = atm.dim_mean(ebudget, 'lon', lon1, lon2)
ebudget_eq_sector = atm.dim_mean(ebudget_eq, 'lon', lon1, lon2)
ps = data_latp['PS'] / 100
# MFC budget
with xray.open_dataset(mfcbudget_file) as mfc_budget:
mfc_budget.load()
mfc_budget = mfc_budget.rename({'DWDT' : 'dw/dt'})
mfc_budget['P-E'] = mfc_budget['PRECTOT'] - mfc_budget['EVAP']
for nm in mfc_budget.data_vars:
mfc_budget[nm] = atm.rolling_mean(mfc_budget[nm], nroll_mfcbudget, center=True)
# ----------------------------------------------------------------------
# Plotting functions and other utilities
def get_varnm(nm):
varnms = {'U200' : 'U', 'V200' : 'V', 'T200' : 'T', 'TLML' : 'T',
'QLML' : 'Q', 'THETA_E_LML' : 'THETA_E'}
return varnms.get(nm)
def get_colormap(nm):
if nm.startswith('PCP') or nm == 'GPCP':
cmap = 'hot_r'
else:
cmap = 'RdBu_r'
return cmap
def fix_axes(axlims):
plt.gca().set_ylim(axlims[:2])
plt.gca().set_xlim(axlims[2:])
plt.draw()
def add_labels(grp, labels, pos, fontsize, fontweight='bold'):
# Expand pos to list for each subplot, if needed
try:
n = len(pos[0])
except TypeError:
pos = [pos] * len(labels)
row, col = 0, 0
for i in range(len(labels)):
grp.subplot(row, col)
atm.text(labels[i], pos[i], fontsize=fontsize,
fontweight=fontweight)
col += 1
if col == grp.ncol:
col = 0
row += 1
def skip_ticklabel(xticks):
xtick_labels = []
for i, n in enumerate(xticks):
if i % 2 == 0:
xtick_labels = xtick_labels + ['']
else:
xtick_labels = xtick_labels + [n]
return xtick_labels
def plot_mfc_budget(mfc_budget, index, year, legend=True,
legend_kw={'fontsize' : 9, 'loc' : 'upper left',
'handlelength' : 2.5},
dashes=[6, 2], netprecip=False, labelpad=1.5):
ts = mfc_budget.sel(year=year)
ind = index.sel(year=year)
days = ts['day'].values
styles = {'PRECTOT' : {'color' : 'k', 'linestyle' : '--', 'dashes' : dashes},
'EVAP' : {'color' : 'k'},
'MFC' : {'color' : 'k', 'linewidth' : 2},
'dw/dt' : {'color' : '0.7', 'linewidth' : 2}}
if netprecip:
styles['P-E'] = {'color' : 'b', 'linewidth' : 2}
for nm in styles:
plt.plot(days, ts[nm], label=nm, **styles[nm])
plt.axvline(ind['onset'], color='k')
plt.axvline(ind['retreat'], color='k')
plt.xlabel('Day of Year')
plt.ylabel('mm day$^{-1}$', labelpad=labelpad)
ax1 = plt.gca()
ax2 = plt.twinx()
plt.sca(ax2)
plt.plot(days, ind['tseries'], 'r', alpha=0.6, linewidth=2, label='CMFC')
atm.fmt_axlabels('y', 'mm', color='r', alpha=0.6)
plt.gca().set_ylabel('mm', labelpad=labelpad)
if legend:
atm.legend_2ax(ax1, ax2, **legend_kw)
return ax1, ax2
def daily_tseries(tseries, index, pcp_nm, npre, npost, grp, keys1=None,
keys2=None, units1=None, units2=None, ylims=None,
legend_loc=None, ind_nm='onset', grid=False, dashes=[6, 2],
dlist=[15], labelpad=1.5, legend=True, xlabel=''):
"""Plot dailyrel timeseries climatology"""
xlims = (-npre, npost)
xticks = range(-npre, npost + 10, 30)
if ind_nm == 'onset':
x0 = [0, index['length'].mean(dim='year')]
xtick_labels = xticks
else:
x0 = [-index['length'].mean(dim='year'), 0]
xtick_labels = skip_ticklabel(xticks)
y2_opts={'color' : 'r', 'alpha' : 0.6}
dashed = {'color' : 'k', 'linestyle' : '--', 'dashes' : dashes}
styles = ['k', dashed, 'g', 'm']
legend_kw = {}
legend_kw['loc'] = legend_loc
y1_label = units1
y2_label = units2
data1 = tseries[keys1]
if keys2 is not None:
data2 = tseries[keys2]
else:
data2 = None
data1_styles = {nm : style for (nm, style) in zip(keys1, styles)}
axs = utils.plotyy(data1, data2, xname='dayrel', data1_styles=data1_styles,
y2_opts=y2_opts, xlims=xlims, xticks=xticks, ylims=ylims,
xlabel=xlabel, y1_label=y1_label, y2_label=y2_label,
legend=legend, legend_kw=legend_kw, x0_axvlines=x0,
grid=grid)
for ax, label in zip(axs, [y1_label, y2_label]):
ax.set_ylabel(label, labelpad=labelpad)
plt.gca().set_xticklabels(xtick_labels)
if dlist is not None:
for d0 in dlist:
plt.axvline(d0, color='k', linestyle='--', dashes=dashes)
def latpres(data_latp, day, ps, xlims=(-60, 60), xticks=range(-60, 61, 15),
title=None, clev_u=5, clev_psi=5, u_clr='#EE82EE', u_kw={},
<<<<<<< HEAD
psi_kw={}):
=======
psi_kw={}, title_fontsize=14):
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
"""Plot lat-pres contours of streamfunction and zonal wind.
"""
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
latp_data = atm.subset(data_latp, {'dayrel' : (day, day)}, squeeze=True)
u = latp_data['U']
psi = latp_data['PSI']
atm.contour_latpres(u, clev=clev_u, topo=ps, colors=u_clr,
contour_kw=u_kw, axlims=axlims)
atm.contour_latpres(psi, clev=clev_psi, omitzero=True, axlims=axlims,
contour_kw=psi_kw)
plt.xticks(xticks, xticks)
#plt.grid()
if title is not None:
<<<<<<< HEAD
plt.title(title)
=======
plt.title(title, fontsize=title_fontsize)
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
def get_latmax(var):
# Temporary - take subset to avoid wonky data at end of timeseries
var = atm.subset(var.copy(), {'dayrel' : (-120, 170)})
# ------------------------------------------
lat = atm.get_coord(var, 'lat')
coords={'dayrel': var['dayrel']}
latdim = atm.get_coord(var, 'lat', 'dim')
latmax = lat[np.nanargmax(var, axis=latdim)]
latmax = xray.DataArray(latmax, dims=['dayrel'], coords=coords)
return latmax
def annotate_latmax(var, ax=None, nroll=None, annotate=True):
latmax = get_latmax(var)
days = atm.get_coord(latmax, 'dayrel')
if ax is None:
ax = plt.gca()
if nroll is not None:
latmax = atm.rolling_mean(latmax, nroll, center=True)
latmax_0 = latmax.sel(dayrel=0)
ax.plot(days, latmax, 'k', linewidth=2, label='Latitude of Max')
if annotate:
ax.legend(loc='lower right', fontsize=10)
s = atm.latlon_labels(latmax_0, latlon='lat', fmt='%.1f')
ax.annotate(s, xy=(0, latmax_0), xycoords='data',
xytext=(-40, 20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
return latmax
def contourf_latday(var, clev=None, title='', cticks=None, climits=None,
nc_pref=40, grp=None,
xlims=(-120, 200), xticks=np.arange(-120, 201, 30),
ylims=(-60, 60), yticks=np.arange(-60, 61, 20),
dlist=None, grid=False, ind_nm='onset'):
var = atm.subset(var, {'lat' : ylims})
vals = var.values.T
lat = atm.get_coord(var, 'lat')
days = atm.get_coord(var, 'dayrel')
cmap = get_colormap(var.name)
if var.min() < 0:
symmetric = True
else:
symmetric = False
if var.name.startswith('PCP'):
extend = 'max'
else:
extend = 'both'
if clev == None:
cint = atm.cinterval(vals, n_pref=nc_pref, symmetric=symmetric)
clev = atm.clevels(vals, cint, symmetric=symmetric)
elif len(atm.makelist(clev)) == 1:
if var.name == 'PREC':
clev = np.arange(0, 10 + clev/2.0, clev)
else:
clev = atm.clevels(vals, clev, symmetric=symmetric)
plt.contourf(days, lat, vals, clev, cmap=cmap, extend=extend)
plt.colorbar(ticks=cticks)
plt.clim(climits)
atm.ax_lims_ticks(xlims, xticks, ylims, yticks)
plt.grid(grid)
plt.title(title)
if dlist is not None:
for d0 in dlist:
plt.axvline(d0, color='k')
<<<<<<< HEAD
if grp is not None and grp.row == grp.nrow - 1:
plt.xlabel('Days Since ' + ind_nm.capitalize())
if grp is not None and grp.col == 0:
plt.ylabel('Latitude')
=======
# if grp is not None and grp.row == grp.nrow - 1:
# plt.xlabel('Days Since ' + ind_nm.capitalize())
# if grp is not None and grp.col == 0:
# plt.ylabel('Latitude')
plt.xlabel('Days Since ' + ind_nm.capitalize())
plt.ylabel('Latitude')
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
def latlon_and_sector(var, vardiff, lon1, lon2, grp, clim=None,
clim_diff=None, axlims=(-60, 60, 40, 120),
dashes=[6, 2], xticks=range(40, 121, 20),
lg_fontsize=12, lg_loc='upper left'):
subset_dict = {'lat' : (axlims[0], axlims[1]),
'lon' : (axlims[2], axlims[3])}
xtick_labels = atm.latlon_labels(xticks, 'lon')
for i in range(1, len(xtick_labels), 2):
xtick_labels[i] = ''
var = atm.subset(var, subset_dict)
vardiff = atm.subset(vardiff, subset_dict)
varbar = xray.Dataset()
daynm = 'D%.0f'
for day in var.dayrel:
dnm = daynm % day
varbar[dnm] = atm.dim_mean(var.sel(dayrel=day), 'lon', lon1, lon2)
varbar['DIFF'] = atm.dim_mean(vardiff, 'lon', lon1, lon2)
cmap = get_colormap(var.name)
cmap_diff = 'RdBu_r'
# Day 0
grp.next()
atm.pcolor_latlon(var[0], cmap=cmap, axlims=axlims)
plt.clim(clim)
if grp.row == 0:
plt.title(daynm % var['dayrel'].values[0])
ylimits = axlims[:2]
plt.ylim(ylimits)
plt.xticks(xticks, xtick_labels)
plt.ylabel(var.name)
# Day 0-15 difference
grp.next()
atm.pcolor_latlon(vardiff, cmap=cmap_diff, axlims=axlims)
if clim_diff is None:
vmax = np.nanmax(abs(vardiff))
clim_diff = (-vmax, vmax)
plt.clim(clim_diff)
if grp.row == 0:
plt.title('DIFF')
plt.ylim(ylimits)
plt.xticks(xticks, xtick_labels)
plt.gca().set_yticklabels([])
# Sector mean line plot
grp.next()
latnm = atm.get_coord(varbar, 'lat', 'name')
xticks = np.arange(axlims[0], axlims[1] + 1, 20)
xlims = axlims[:2]
legend_kw = {'handlelength': 2, 'fontsize': lg_fontsize, 'loc' : lg_loc}
dashed = {'color' : 'k', 'linestyle' : '--', 'dashes' : dashes}
styles = ['k', dashed]
keys = varbar.data_vars.keys()[:2]
data1 = varbar[keys]
data1_styles = {nm : style for nm, style in zip(keys, styles)}
if grp.row == grp.nrow - 1:
xlabel = 'Latitude'
else:
xlabel = ''
if grp.row == 0:
plt.title('SASM Sector Mean')
utils.plotyy(data1, data2=varbar['DIFF'], xname=latnm,
data1_styles=data1_styles,
xlims=xlims, xticks=xticks, ylims=None, yticks=None,
y2_lims=None, xlabel=xlabel, y1_label='', y2_label='',
legend=True, legend_kw=legend_kw, grid=False)
def ubudget_lineplot(ubudget_sector, keys, day, style, xlims=(-60, 60),
xticks=range(-60, 61, 15), ylims=None, ylabel=None, legend=True,
legend_kw={'fontsize' : 8, 'loc' : 'lower center', 'ncol' : 2,
'handlelength' : 2.5}):
"""Plot ubudget terms and winds vs latitude."""
subset_dict = {'dayrel' : (day, day), 'lat': xlims}
data = atm.subset(ubudget_sector[keys], subset_dict, squeeze=True)
data = data.to_dataframe()
data.plot(ax=plt.gca(), style=style, legend=False)
plt.xlim(xlims)
plt.ylim(ylims)
plt.xticks(xticks, xticks)
plt.gca().set_xticks(xticks, minor=True)
plt.xlabel('Latitude')
#plt.grid()
if legend:
plt.legend(**legend_kw)
if ylabel is not None:
plt.ylabel(ylabel)
def psi_decomposition(psi, ps, cint=10, xlims=(-60, 60),
xticks=range(-60, 61, 15), title='', u=None,
<<<<<<< HEAD
u_clr='#EE82EE'):
=======
u_clr='#EE82EE', title_fontsize=14):
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
if u is not None:
atm.contour_latpres(u, clev=[0], omitzero=False, colors=u_clr,
axlims=axlims)
atm.contour_latpres(psi, clev=cint, topo=ps, omitzero=True, axlims=axlims)
plt.xticks(xticks, xticks)
#plt.grid()
<<<<<<< HEAD
plt.title(title, fontsize=10)
=======
plt.title(title, fontsize=title_fontsize)
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
# ======================================================================
# FIGURES
# ======================================================================
# ----------------------------------------------------------------------
# MFC budget and tseries fits for CHP onset/retreat indices
plotyear = 2000
figsize = (0.6 * figwidth, 0.4 * figwidth)
ind = index.sel(year=plotyear)
mfc = ind['daily_ts']
cmfc = ind['tseries']
fit_onset = ind['tseries_fit_onset']
fit_retreat = ind['tseries_fit_retreat']
days = ind['day']
plt.figure(figsize=figsize)
plt.plot(days, mfc, 'k', linewidth=2)
plt.xlabel('Day of Year')
plt.ylabel('mm day$^{-1}$')
plt.figure(figsize=figsize)
plt.plot(days, cmfc, 'r', linewidth=2)
plt.xlabel('Day of Year')
plt.ylabel('mm')
<<<<<<< HEAD
plt.figure(figsize=figsize)
plt.plot(days, cmfc, 'r', linewidth=2)
plt.plot(days, fit_onset, 'b', days, fit_retreat, 'k')
plt.axvline(250, color='b', linewidth=0.5)
plt.axvline(200, color='k', linewidth=0.5)
plt.xlabel('Day of Year')
plt.ylabel('mm')
=======
ts_list = [fit_onset, fit_retreat]
ind_list = [ind['onset'], ind['retreat']]
for ts, d0, color in zip(ts_list, ind_list, ['b', 'b']):
plt.figure(figsize=figsize)
plt.plot(days, cmfc, 'r', linewidth=2)
plt.plot(days, ts, color, linewidth=2)
plt.axvline(d0, color=color)
plt.xlabel('Day of Year')
plt.ylabel('mm')
atm.savefigs('figs/tsfit', 'png', dpi=200)
print('Done!')
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
legend_kw = {'loc' : 'upper left', 'framealpha' : 0.0}
plt.figure(figsize=figsize)
plot_mfc_budget(mfc_budget, index, plotyear, dashes=dashes, legend=True,
legend_kw=legend_kw)
# ----------------------------------------------------------------------
# Daily tseries
nrow, ncol = 2, 2
fig_kw = {'figsize' : (figwidth, 0.7 * figwidth)}
gridspec_kw = {'left' : 0.07, 'right' : 0.9, 'bottom' : 0.07, 'top' : 0.94,
'wspace' : 0.5, 'hspace' : 0.39}
legend = True
legend_kw = {'loc' : 'upper left', 'framealpha' : 0.0}
legend = True
dlist = [15]
opts = []
opts.append({'keys1' : ['MFC', pcp_nm], 'keys2' : ['CMFC'],
'units1' : 'mm day$^{-1}$', 'units2' : 'mm',
'ylims' : (-3.5, 9), 'legend_loc' : 'upper left' })
opts.append({'keys1' : ['U850_15N'], 'keys2' : ['V850_15N'],
'units1' : ' m s$^{-1}$', 'units2' : ' m s$^{-1}$',
'ylims' : (-8, 15), 'legend_loc' : 'upper left' })
opts.append({'keys1' : ['T200_30N'], 'keys2' : ['T200_30S'],
'units1' : ' K', 'units2' : ' K',
'ylims' : (218, 227), 'legend_loc' : 'upper left' })
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for opt in opts:
grp.next()
xlabel = 'Days Since Onset'
daily_tseries(tseries, index, pcp_nm, npre, npost, grp, legend=legend,
ind_nm=ind_nm, dlist=dlist, xlabel=xlabel, **opt)
# ----------------------------------------------------------------------
# Lat-pres contour plots of streamfunction, U
nrow, ncol = 2, 3
advance_by = 'row'
fig_kw = {'figsize' : (figwidth, 0.7*figwidth), 'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.1, 'right' : 0.96, 'wspace' : 0.06, 'hspace' : 0.2,
'bottom' : 0.08, 'top' : 0.95}
plotdays = [-45, -30, -15, 0, 15, 30]
xlims, xticks = (-35, 35), range(-30, 31, 10)
grp = atm.FigGroup(nrow, ncol,fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for day in plotdays:
grp.next()
title = 'Day %d' % day
latpres(data_latp, day, ps=ps, xlims=xlims, xticks=xticks)
<<<<<<< HEAD
plt.title(title, fontsize=11)
=======
plt.title(title, fontsize=14)
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
if grp.row < grp.nrow - 1:
plt.xlabel('')
if grp.col > 0:
plt.ylabel('')
# ----------------------------------------------------------------------
# Hovmoller plots (lat-day)
xticks = range(-npre, npost + 10, 30)
if ind_nm == 'onset':
dlist = [0, index['length'].mean(dim='year')]
d0 = 15
xtick_labels = xticks
else:
dlist = [-index['length'].mean(dim='year'), 0]
d0 = None
xtick_labels = skip_ticklabel(xticks)
<<<<<<< HEAD
keys = [pcp_nm, 'PSI500', 'U200', 'U850', 'U200', pcp_nm, 'T200',
=======
keys = [pcp_nm, 'PSI500', 'U850', 'U200', 'U200', 'T200', pcp_nm,
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
'THETA_E_LML']
nms_dict = {'PSI500' : '$\psi$500', 'THETA_E_LML' : r'${\theta}_{eb}$'}
clevs = {pcp_nm : 1, 'U200' : 5, 'V200' : 1, 'PSI500' : 5, 'T200' : 0.5,
'THETA_E_LML' : 2.5, 'TLML' : 1, 'QLML' : 5e-4, 'U850' : 1,
'MSE_LML' : 2}
cticks_dict = {pcp_nm : np.arange(0, 13, 2),
'T200' : np.arange(208, 229, 4),
'U200' : np.arange(-80, 81, 20),
'U850' : np.arange(-15, 16, 5),
'PSI500' : np.arange(-80, 81, 20),
'THETA_E_LML' : np.arange(240, 361, 20),
'MSE_LML' : np.arange(240, 361, 20)}
clim_dict = {pcp_nm : (0, 10), 'U200' : (-50, 50),
'PSI500' : (-80, 80), 'T200' : (208, 227),
'THETA_E_LML' : (260, 350), 'U850' : (-18, 18),
'MSE_LML' : (245, 350)}
plot_latmax = False
nrow, ncol = 2, 2
<<<<<<< HEAD
fig_kw = {'figsize' : (figwidth, 0.64 * figwidth), 'sharex' : True,
'sharey' : True}
gridspec_kw = {'left' : 0.07, 'right' : 0.99, 'bottom' : 0.07, 'top' : 0.94,
'wspace' : 0.05}
=======
fig_kw = {'figsize' : (figwidth, 0.64 * figwidth)}
gridspec_kw = {'left' : 0.07, 'right' : 0.99, 'bottom' : 0.07, 'top' : 0.94,
'wspace' : 0.2, 'hspace' : 0.4}
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for key in keys:
grp.next()
var = data_hov[key]
clev = clevs.get(key)
cticks = cticks_dict.get(key)
climits = clim_dict.get(key)
if key in nms_dict:
title = nms_dict[key]
else:
title = key.upper()
print(key, clev, climits, cticks)
contourf_latday(var, clev=clev, cticks=cticks, climits=climits,
title=title, grp=grp,
dlist=dlist, ind_nm=ind_nm)
if d0 is not None:
plt.axvline(d0, color='k', linestyle='--', dashes=dashes)
if plot_latmax and key.startswith('THETA_E'):
latmax = annotate_latmax(var, nroll=None)
plt.xticks(xticks, xtick_labels)
plt.xlim(-npre, npost)
# ----------------------------------------------------------------------
# D0--D15 Lat-lon and sector line plots
nms_list = [['U200', 'T200'], ['THETA_E_LML', 'TLML']]
clim_dict = {'GPCP' : (0, 12), 'U200' : (-50, 50), 'T200' : (213, 227),
'TLML' : (260, 315), 'QLML' : (0, 0.022),
'THETA_E_LML' : (270, 360)}
lg_loc = {'U200' : 'lower left', 'T200' : 'upper left', 'TLML' : 'upper left',
'THETA_E_LML' : 'upper left'}
ncol = 3
gridspec_kw = {'left' : 0.12, 'right' : 0.9, 'bottom' : 0.09, 'top' : 0.93,
'wspace' : 0.45, 'hspace' : 0.15, 'width_ratios' : [1, 1, 1.5]}
for nms in nms_list:
nrow = len(nms)
if nrow < 3:
height = 0.55 * figwidth
else:
height = 0.8 * figwidth
fig_kw = {'figsize' : (figwidth, height), 'sharex' : 'col'}
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for nm in nms:
latlon_and_sector(data_latlon[nm], data_diff[nm], lon1, lon2, grp,
clim=clim_dict[nm], clim_diff=None, dashes=dashes,
lg_loc=lg_loc[nm])
# ----------------------------------------------------------------------
# Ubudget components at 200 hPa
style = {'ADV_MMC' : 'b', 'COR_MMC' : 'b--', 'ADV+COR' : 'r', 'DMDY' : 'r',
'PGF' : 'k', 'CRS' : 'g', 'ADV_AVST' : 'g--',
'ADV_STAV' : 'g-.', 'EMFC' : 'm', 'EMFC_TR' : 'm--', 'EMFC_ST' : 'm-.',
'SUM' : 'k--', 'ACCEL' : 'c', 'ANA' : 'y', 'U' : 'k', 'V' : 'k--'}
keys_dict = collections.OrderedDict()
keys_dict['ubudget'] = ['ADV_MMC', 'COR_MMC', 'DMDY', 'PGF',
'CRS', 'EMFC']
keys_dict['winds'] = ['U']
keys_dict['eddies'] = ['EMFC_TR', 'EMFC_ST', 'EMFC', 'ADV_CRS']
ylabels = {}
units = '$10^{-4}$ m s$^{-2}$'
ylabels['ubudget'] = units
ylabels['eddies'] = ylabels['ubudget']
ylabels['winds'] = 'm s$^{-1}$'
ylims = {'ubudget' : (-8, 8), 'winds' : (-20, 50)}
plotdays = [-30, 0, 30]
nrow, ncol = 2, 3
advance_by = 'row'
fig_kw = {'figsize' : (figwidth, 0.5 * figwidth),
'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.09, 'hspace' : 0.1,
'bottom' : 0.09, 'top' : 0.92, 'height_ratios' : [0.5, 1]}
legend_kw={'fontsize' : 8, 'loc' : 'upper center', 'ncol' : 2,
'handlelength' : 2.5}
xlims, xticks = (-60, 60), range(-60, 61, 15)
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw)
for day in plotdays:
for nm in ['winds', 'ubudget']:
grp.next()
if grp.row == 0:
plt.title('Day %d' % day)
if grp.col == 0:
legend = True
else:
legend = False
keys = keys_dict[nm]
ubudget_lineplot(ubudget, keys, day, style, xlims=xlims,
xticks=xticks, ylims=ylims[nm],
legend=legend, legend_kw=legend_kw,
ylabel=ylabels[nm])
if nm == 'winds':
plt.axhline(0, color='0.7', linestyle='--', dashes=[6, 1])
if grp.row == grp.nrow - 1:
plt.xlabel('Latitude')
# ----------------------------------------------------------------------
# Streamfunction decomposition
plotdays = [-30, 0, 30]
#plotdays = [-15, 0, 15]
<<<<<<< HEAD
keys = ['TOT', 'MMC', 'EDDY']
=======
#keys = ['TOT', 'MMC', 'EDDY', 'PGF', 'RESID']
keys = ['TOT', 'MMC', 'EDDY', 'PGF']
#keys = ['TOT', 'MMC', 'EDDY']
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
xlims, xticks = (-35, 35), range(-30, 31, 10)
cint = 5
nrow, ncol = len(keys), len(plotdays)
advance_by = 'col'
fig_kw = {'figsize' : (figwidth, 0.7 * figwidth), 'sharex' : True,
'sharey' : True}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.11,
'bottom' : 0.08, 'top' : 0.95}
#suptitle = '%d-%dE $\psi$ components' % (lon1, lon2)
suptitle = ''
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw, suptitle=suptitle)
for key in keys:
for day in plotdays:
grp.next()
if grp.row == 0:
title = 'Day %d' % day
u = data_latp['U'].sel(dayrel=day)
else:
title = ''
u = None
if key == 'TOT':
psi = data_latp['PSI'].sel(dayrel=day)
else:
psi = data['psi_comp'][key].sel(dayrel=day)
psi_decomposition(psi, ps, cint, xlims, xticks, title=title,
u=u)
if grp.col > 0:
plt.ylabel('')
if grp.row < grp.nrow - 1:
plt.xlabel('')
atm.text(key, (0.05, 0.88))
# ----------------------------------------------------------------------
# Energy budget - contour plots
def contour_londay(var, clev=None, grp=None,n_pref=40,
yticks=np.arange(-120, 201, 30)):
lon = atm.get_coord(var, 'lon')
days = atm.get_coord(var, 'dayrel')
if clev is None:
cint = atm.cinterval(var, n_pref=n_pref, symmetric=True)
clev = atm.clevels(var, cint, symmetric=True)
plt.contourf(lon, days, var, clev, cmap='RdBu_r', extend='both')
plt.colorbar()
plt.yticks(yticks)
plt.axhline(0, color='0.5', linestyle='--', dashes=[6, 1])
if grp is not None and grp.row == grp.nrow - 1:
plt.xlabel('Longitude')
if grp is not None and grp.col == 0:
plt.ylabel('Days Since Onset')
mse_vars = {'VMSE' : 'VH', 'VCPT' : 'VFLXCPT', 'VPHI' : 'VFLXPHI',
'VLQV' : 'VFLXLQV'}
scale = 1e9
vmse_eq = xray.Dataset({nm : ebudget_eq[mse_vars[nm]] for nm in mse_vars})
vmse_eq = vmse_eq / scale
nrow, ncol = 2, 2
fig_kw = {'figsize' : (figwidth, 0.7 * figwidth), 'sharex' : True,
'sharey' : True}
gridspec_kw = {'left' : 0.1, 'right' : 0.99, 'bottom' : 0.07, 'top' : 0.9,
'wspace' : 0.05}
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw)
lonrange = (40, 120)
for nm in ['VMSE', 'VCPT', 'VPHI', 'VLQV']:
grp.next()
var = atm.subset(vmse_eq[nm], {'lon' : lonrange})
contour_londay(var, grp=grp)
<<<<<<< HEAD
plt.title(nm, fontsize=11)
=======
plt.title(nm, fontsize=14)
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
plt.gca().invert_yaxis()
labels = ['a', 'b', 'c', 'd']
x1, x2, y0 = -0.15, -0.05, 1.05
pos = [(x1, y0), (x2, y0), (x1, y0), (x2, y0)]
add_labels(grp, labels, pos, labelsize)
# ----------------------------------------------------------------------
# Energy budget - sector means
# vmse_sector = xray.Dataset()
# for nm in ['VH', 'VFLXCPT', 'VFLXPHI', 'VFLXLQV']:
# key = nm.replace('VFLX', 'V').replace('VH', 'VMSE')
# vmse_sector[key] = ebudget_sector[nm] / scale
# Cross-equatorial flues integrated over sectors
a = atm.constants.radius_earth.values
eq_int = xray.Dataset()
lonranges = [(40, 60), (40, 100), (lon1, lon2)]
eq_int.attrs['lonranges'] = ['%d-%dE' % lonrange for lonrange in lonranges]
for lonrange in lonranges:
lon1, lon2 = lonrange
dist = a * np.radians(lon2 - lon1)
for nm in vmse_eq.data_vars:
key = nm + '_%d-%dE' % (lon1, lon2)
eq_int[key] = atm.dim_mean(vmse_eq[nm], 'lon', lon1, lon2) * dist
# Convert to PW
eq_int = eq_int / 1e6
days = atm.get_coord(eq_int, 'dayrel')
nms = ['VMSE', 'VCPT', 'VPHI', 'VLQV']
<<<<<<< HEAD
=======
nms_dict = {'VMSE' : r'$vh$', 'VCPT' : r'$vC_pT$', 'VPHI' : r'$vgz$', 'VLQV' : r'$vL_vq_v$'}
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
colors = {'40-60E' : 'r', '60-100E' : 'b'}
styles = {'VMSE' : {'linewidth' : 2}, 'VPHI' : {'linestyle' : 'dotted'},
'VCPT' : {'linestyle' : 'dashed', 'dashes' : dashes},
'VLQV' : {'linestyle' : 'solid'}}
<<<<<<< HEAD
lonranges = ['40-60E', '60-100E']
#lonranges = eq_int.attrs['lonranges']
plt.figure(figsize=(0.7*figwidth, 0.4 * figwidth))
=======
#lonranges = ['40-60E', '60-100E']
lonranges = ['60-100E']
#lonranges = eq_int.attrs['lonranges']
plt.figure(figsize=(0.7*figwidth, 0.45 * figwidth))
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
for lonrange in lonranges:
for nm in nms:
style = styles[nm]
style['color'] = colors[lonrange]
key = nm + '_' + lonrange
<<<<<<< HEAD
plt.plot(days, eq_int[key], label=key, **style)
#plt.legend(loc='upper left', ncol=1, handlelength=3)
=======
plt.plot(days, eq_int[key], label=nms_dict[nm], **style)
plt.legend(loc='lower left', ncol=1, handlelength=3, fontsize=14)
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
#plt.grid()
plt.xticks(np.arange(-120, 211, 30))
plt.xlim(-120, 210)
plt.axvline(0, color='0.5')
plt.xlabel('Days Since Onset')
<<<<<<< HEAD
plt.ylabel('<V*MSE> (PW)')
=======
plt.ylabel('Flux (PW)')
plt.title('Cross-Equatorial MSE Fluxes')
>>>>>>> 135ba4b6b8b232f5b98b59eefdb1b21018d3f0bd
# nrow, ncol = 1, 2
# fig_kw = {'figsize' : (figwidth, 0.4 * figwidth), 'sharex' : True}
# gridspec_kw = {'left' : 0.07, 'right' : 0.96, 'bottom' : 0.15, 'top' : 0.9,
# 'wspace' : 0.15}
# #suptitle = 'Sector Cross-Eq <V*MSE> (%s)' % eq_int.attrs['units']
# suptitle = ''
# grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw,
# suptitle=suptitle)
#
# for lonrange in lonranges:
# grp.next()
# plt.title(lonrange, fontsize=11)
# for nm in nms:
# key = nm + '_' + lonrange
# plt.plot(days, eq_int[key], label=nm, **styles[nm])
# plt.legend(fontsize=9, loc=locs[lonrange], handlelength=3)
# #plt.grid()
# plt.xticks(np.arange(-120, 201, 30))
# plt.axvline(0, color='0.5')
# if grp.row == grp.nrow - 1:
# plt.xlabel('Days Since Onset')
# if grp.col == 0:
# plt.ylabel('<V*MSE> (PW)')
|
[
"jenfly@gmail.com"
] |
jenfly@gmail.com
|
f496cd962f255e71d69c6fa7b66ae0634ff97d06
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-GameplayKit/PyObjCTest/test_gknoise.py
|
d8d8bf40f182311383d8ad2cf7e5e9c0dbc42166
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from PyObjCTools.TestSupport import *
import sys
if sys.maxsize >= 2 ** 32:
import GameplayKit
class TestGKNoise (TestCase):
def testMethods(self):
self.assertArgIsBOOL(GameplayKit.GKNoise.remapValuesToTerracesWithPeaks_terracesInverted_, 1)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
c18f4b11de5933b4556a38ebe129b9bf836b1c3e
|
77c641fd0708b279dddbe01f6af32a8531b93185
|
/marketsim/gen/_out/_test/overloading/_hh.py
|
5783a61189e232f2e4fe892be8e84c6c05aa2234
|
[] |
no_license
|
abensrhir/marketsimulator
|
aea286afd2bb2e0c8a547bfa879601aef21c0cd5
|
f9f55c72fb34cdbec42b96737ca20839f26c6299
|
refs/heads/master
| 2020-12-13T20:55:55.795344
| 2014-02-24T22:52:24
| 2014-02-24T22:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
from marketsim import registry
from marketsim.gen._out._ifunction import IFunctionfloat
from marketsim import context
@registry.expose(["internal tests", "hh"])
class hh_(IFunctionfloat):
"""
"""
def __init__(self):
from marketsim import rtti
rtti.check_fields(self)
self.impl = self.getImpl()
@property
def label(self):
return repr(self)
_properties = {
}
def __repr__(self):
return "hh" % self.__dict__
def bind(self, ctx):
self._ctx = ctx.clone()
_internals = ['impl']
def __call__(self, *args, **kwargs):
return self.impl()
def reset(self):
self.impl = self.getImpl()
ctx = getattr(self, '_ctx', None)
if ctx: context.bind(self.impl, ctx)
def getImpl(self):
from marketsim.gen._out._test.overloading._f import f_Float as __test_overloading_f_Float
from marketsim.gen._out._constant import constant_Float as _constant_Float
return __test_overloading_f_Float(_constant_Float(12.2))
def hh():
from marketsim import rtti
return hh_()
raise Exception('Cannot find suitable overload for hh('++')')
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
862a90508c044108bbf00e519d7eccb9a8c34822
|
7e0e991dc44ed1bd4627821cc1f26d72e2f2caf6
|
/node_modules/utf-8-validate/build/config.gypi
|
b127854b91068e846adb41d14a9af2b6baa839ad
|
[
"MIT"
] |
permissive
|
odino/react-native-codemotion
|
14f50e09c3dc20a9437d180dce7f506b5b7c7c64
|
4ae74296f178d6620a67b1b685b08199c4a009c8
|
refs/heads/master
| 2020-12-25T21:55:44.140336
| 2016-03-17T08:18:19
| 2016-03-17T08:19:10
| 53,743,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,571
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.24",
"host_arch": "x64",
"icu_small": "false",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/home/odino/local/node",
"node_release_urlbase": "",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "/home/odino/.node-gyp/5.0.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/3.3.6 node/v5.0.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/odino/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"progress": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/usr/bin/zsh",
"dry_run": "",
"prefix": "/home/odino/local/node",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/odino/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"access": "",
"also": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "5.0.0",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/odino/local/node/etc/npmrc",
"init_module": "/home/odino/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/odino/local/node/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "alessandro.nadalin@gmail.com",
"json": ""
}
}
|
[
"alessandro.nadalin@gmail.com"
] |
alessandro.nadalin@gmail.com
|
a3e8c7a7db85606208894b677b93edc8a3676bfb
|
66236f42fa50c20bb5e302259d57af06027803a1
|
/fun_util.py
|
10b4214521999d974ab0533579428c44558a3249
|
[] |
no_license
|
sauravkaushik8/Sign-Language
|
5eb8fe90c92155c8b5c35a9f09b131c03c2e1e94
|
438b75a2347cdfd1dff6f67ad438611b180841b9
|
refs/heads/master
| 2021-04-06T04:21:30.681822
| 2018-03-08T05:49:29
| 2018-03-08T05:49:29
| 124,418,935
| 0
| 1
| null | 2018-03-08T16:33:08
| 2018-03-08T16:33:08
| null |
UTF-8
|
Python
| false
| false
| 10,132
|
py
|
import cv2, pickle
import numpy as np
import tensorflow as tf
from cnn_tf import cnn_model_fn
import os
import sqlite3, pyttsx3
from keras.models import load_model
from threading import Thread
engine = pyttsx3.init()
engine.setProperty('rate', 150)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
model = load_model('cnn_model_keras2.h5')
def get_hand_hist():
with open("hist", "rb") as f:
hist = pickle.load(f)
return hist
def get_image_size():
img = cv2.imread('gestures/0/100.jpg', 0)
return img.shape
image_x, image_y = get_image_size()
def keras_process_image(img):
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype=np.float32)
img = np.reshape(img, (1, image_x, image_y, 1))
return img
def keras_predict(model, image):
processed = keras_process_image(image)
pred_probab = model.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
def get_pred_text_from_db(pred_class):
conn = sqlite3.connect("gesture_db.db")
cmd = "SELECT g_name FROM gesture WHERE g_id="+str(pred_class)
cursor = conn.execute(cmd)
for row in cursor:
return row[0]
def get_pred_from_contour(contour, thresh):
x1, y1, w1, h1 = cv2.boundingRect(contour)
save_img = thresh[y1:y1+h1, x1:x1+w1]
text = ""
if w1 > h1:
save_img = cv2.copyMakeBorder(save_img, int((w1-h1)/2) , int((w1-h1)/2) , 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0))
elif h1 > w1:
save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1-w1)/2) , int((h1-w1)/2) , cv2.BORDER_CONSTANT, (0, 0, 0))
pred_probab, pred_class = keras_predict(model, save_img)
if pred_probab*100 > 70:
text = get_pred_text_from_db(pred_class)
return text
def get_operator(pred_text):
try:
pred_text = int(pred_text)
except:
return ""
operator = ""
if pred_text == 1:
operator = "+"
elif pred_text == 2:
operator = "-"
elif pred_text == 3:
operator = "*"
elif pred_text == 4:
operator = "/"
elif pred_text == 5:
operator = "%"
elif pred_text == 6:
operator = "**"
elif pred_text == 7:
operator = ">>"
elif pred_text == 8:
operator = "<<"
elif pred_text == 9:
operator = "&"
elif pred_text == 0:
operator = "|"
return operator
hist = get_hand_hist()
x, y, w, h = 300, 100, 300, 300
is_voice_on = True
def get_img_contour_thresh(img):
img = cv2.flip(img, 1)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
cv2.filter2D(dst,-1,disc,dst)
blur = cv2.GaussianBlur(dst, (11,11), 0)
blur = cv2.medianBlur(blur, 15)
thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.merge((thresh,thresh,thresh))
thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
thresh = thresh[y:y+h, x:x+w]
contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
return img, contours, thresh
def say_text(text):
if not is_voice_on:
return
while engine._inLoop:
pass
engine.say(text)
engine.runAndWait()
def calculator_mode(cam):
global is_voice_on
flag = {"first": False, "operator": False, "second": False, "clear": False}
count_same_frames = 0
first, operator, second = "", "", ""
pred_text = ""
calc_text = ""
info = "Enter first number"
Thread(target=say_text, args=(info,)).start()
count_clear_frames = 0
while True:
img = cam.read()[1]
img, contours, thresh = get_img_contour_thresh(img)
old_pred_text = pred_text
if len(contours) > 0:
contour = max(contours, key = cv2.contourArea)
if cv2.contourArea(contour) > 10000:
pred_text = get_pred_from_contour(contour, thresh)
if old_pred_text == pred_text:
count_same_frames += 1
else:
count_same_frames = 0
if pred_text == "C":
if count_same_frames > 5:
count_same_frames = 0
first, second, operator, pred_text, calc_text = '', '', '', '', ''
flag['first'], flag['operator'], flag['second'], flag['clear'] = False, False, False, False
info = "Enter first number"
Thread(target=say_text, args=(info,)).start()
elif pred_text == "Best of Luck " and count_same_frames > 15:
count_same_frames = 0
if flag['clear']:
first, second, operator, pred_text, calc_text = '', '', '', '', ''
flag['first'], flag['operator'], flag['second'], flag['clear'] = False, False, False, False
info = "Enter first number"
Thread(target=say_text, args=(info,)).start()
elif second != '':
flag['second'] = True
info = "Clear screen"
#Thread(target=say_text, args=(info,)).start()
second = ''
flag['clear'] = True
calc_text += "= "+str(eval(calc_text))
if is_voice_on:
speech = calc_text
speech = speech.replace('-', ' minus ')
speech = speech.replace('/', ' divided by ')
speech = speech.replace('**', ' raised to the power ')
speech = speech.replace('*', ' multiplied by ')
speech = speech.replace('%', ' mod ')
speech = speech.replace('>>', ' bitwise right shift ')
speech = speech.replace('<<', ' bitwise leftt shift ')
speech = speech.replace('&', ' bitwise and ')
speech = speech.replace('|', ' bitwise or ')
Thread(target=say_text, args=(speech,)).start()
elif first != '':
flag['first'] = True
info = "Enter operator"
Thread(target=say_text, args=(info,)).start()
first = ''
elif pred_text != "Best of Luck " and pred_text.isnumeric():
if flag['first'] == False:
if count_same_frames > 15:
count_same_frames = 0
Thread(target=say_text, args=(pred_text,)).start()
first += pred_text
calc_text += pred_text
elif flag['operator'] == False:
operator = get_operator(pred_text)
if count_same_frames > 15:
count_same_frames = 0
flag['operator'] = True
calc_text += operator
info = "Enter second number"
Thread(target=say_text, args=(info,)).start()
operator = ''
elif flag['second'] == False:
if count_same_frames > 15:
Thread(target=say_text, args=(pred_text,)).start()
second += pred_text
calc_text += pred_text
count_same_frames = 0
if count_clear_frames == 30:
first, second, operator, pred_text, calc_text = '', '', '', '', ''
flag['first'], flag['operator'], flag['second'], flag['clear'] = False, False, False, False
info = "Enter first number"
Thread(target=say_text, args=(info,)).start()
count_clear_frames = 0
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
cv2.putText(blackboard, "Calculator Mode", (100, 50), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (255, 0,0))
cv2.putText(blackboard, "Predicted text- " + pred_text, (30, 100), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 0))
cv2.putText(blackboard, "Operator " + operator, (30, 140), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 127))
cv2.putText(blackboard, calc_text, (30, 240), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 255, 255))
cv2.putText(blackboard, info, (30, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 255, 255) )
if is_voice_on:
cv2.putText(blackboard, "Voice on", (450, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 127, 0))
else:
cv2.putText(blackboard, "Voice off", (450, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 127, 0))
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
res = np.hstack((img, blackboard))
cv2.imshow("Recognizing gesture", res)
cv2.imshow("thresh", thresh)
keypress = cv2.waitKey(1)
if keypress == ord('q') or keypress == ord('t'):
break
if keypress == ord('v') and is_voice_on:
is_voice_on = False
elif keypress == ord('v') and not is_voice_on:
is_voice_on = True
if keypress == ord('t'):
return 1
else:
return 0
def text_mode(cam):
global is_voice_on
text = ""
word = ""
count_same_frame = 0
while True:
img = cam.read()[1]
img, contours, thresh = get_img_contour_thresh(img)
old_text = text
if len(contours) > 0:
contour = max(contours, key = cv2.contourArea)
if cv2.contourArea(contour) > 10000:
text = get_pred_from_contour(contour, thresh)
if old_text == text:
count_same_frame += 1
else:
count_same_frame = 0
if count_same_frame > 20:
if len(text) == 1:
Thread(target=say_text, args=(text, )).start()
word = word + text
if word.startswith('I/Me '):
word = word.replace('I/Me ', 'I ')
elif word.endswith('I/Me '):
word = word.replace('I/Me ', 'me ')
count_same_frame = 0
elif cv2.contourArea(contour) < 1000:
if word != '':
#print('yolo')
#say_text(text)
Thread(target=say_text, args=(word, )).start()
text = ""
word = ""
else:
if word != '':
#print('yolo1')
#say_text(text)
Thread(target=say_text, args=(word, )).start()
text = ""
word = ""
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
cv2.putText(blackboard, "Text Mode", (180, 50), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (255, 0,0))
cv2.putText(blackboard, "Predicted text- " + text, (30, 100), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 0))
cv2.putText(blackboard, word, (30, 240), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 255, 255))
if is_voice_on:
cv2.putText(blackboard, "Voice on", (450, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 127, 0))
else:
cv2.putText(blackboard, "Voice off", (450, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 127, 0))
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
res = np.hstack((img, blackboard))
cv2.imshow("Recognizing gesture", res)
cv2.imshow("thresh", thresh)
keypress = cv2.waitKey(1)
if keypress == ord('q') or keypress == ord('c'):
break
if keypress == ord('v') and is_voice_on:
is_voice_on = False
elif keypress == ord('v') and not is_voice_on:
is_voice_on = True
if keypress == ord('c'):
return 2
else:
return 0
def recognize():
cam = cv2.VideoCapture(1)
text = ""
word = ""
count_same_frame = 0
keypress = 1
while True:
if keypress == 1:
keypress = text_mode(cam)
elif keypress == 2:
keypress = calculator_mode(cam)
else:
break
keras_predict(model, np.zeros((50, 50), dtype = np.uint8))
recognize()
|
[
"dibakarsaha1234@gmail.com"
] |
dibakarsaha1234@gmail.com
|
0417966873101d007e1b61287289683ef4cf0d95
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/176/usersdata/268/95673/submittedfiles/funcoes1.py
|
c4843a0faedf2a2c409fff2928c28ecfd8e0d42d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
def crescente (a):
#escreva o código da função crescente aqui
cont=0
for i in range(1,len(a)+1,1):
if a[i] > a[i-1]:
cont=cont+1
else:
break
if cont==len(a)-1:
return(True)
else:
return(False)
#escreva as demais funções
#escreva o programa principal
n=int(input('Digite o numero de termos da primeira lista: '))
a=[]
for i in range(0,n,1):
valor=int(input('Digite o termo : '))
a.append(valor)
print(crescente(a))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4a6a14e3b94074f6ab68372f01161bebb682e5a4
|
fdd7a5e43c95d806d59e1f035958696938586142
|
/abc182/e.py
|
f21006ed7d92f595de95849a667766479df97e30
|
[] |
no_license
|
AnDeriens/atcoder
|
beadd07eec86e933ad5a6f53006a374a99b1d5b8
|
fa6621b2441458dd4889680569ae253aa2a9764b
|
refs/heads/main
| 2023-07-07T21:27:38.547692
| 2021-08-14T03:55:14
| 2021-08-14T03:55:14
| 304,101,308
| 1
| 0
| null | 2021-07-18T06:51:40
| 2020-10-14T18:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
h, w, n, m = list(map(int, input().split()))
denkyu = [()] * n
chizu = [['.' for _ in range(w)] for _ in range(h)]
ans_chizu = [['.' for _ in range(w)] for _ in range(h)]
for i in range(n):
x, y = list(map(int, input().split()))
denkyu[i] = (x, y)
for _ in range(m):
x, y = list(map(int, input().split()))
chizu[1][1] = '|'
for (a, b) in denkyu:
a -= 1
b -= 1
for x in range(a, -1, -1):
if chizu[b][x] == '.':
ans_chizu[b][x] = '*'
elif chizu[b][x] == '|':
break
for x in range(a, w):
if chizu[b][x] == '.':
ans_chizu[b][x] = '*'
elif chizu[b][x] == '|':
break
for y in range(b, -1, -1):
if chizu[y][a] == '.':
ans_chizu[y][a] = '*'
elif chizu[y][a] == '|':
break
for y in range(y, h):
if chizu[y][a] == '.':
ans_chizu[y][a] = '*'
elif chizu[y][a] == '|':
break
ans = 0
for x in range(w):
for y in range(h):
if ans_chizu[y][x] == '*':
ans += 1
print(ans)
|
[
"katsuya.ando496@gmail.com"
] |
katsuya.ando496@gmail.com
|
b428b9059469e307dc8c124605968249006bff10
|
ef76f2d5cc7fa95f2670c79355ae8a3881209cd2
|
/main.py
|
5fad1c5532740db2e6f848b309f83c9d3f849050
|
[] |
no_license
|
erfanmoghaddam/KakuroPuzzle
|
4a2c0039fec8e7597241482d3763c69cc31c26ba
|
579d29bcfbc623ec95ea0b6d0a29fe4958204835
|
refs/heads/main
| 2023-04-04T13:54:50.642194
| 2021-04-18T02:27:46
| 2021-04-18T02:27:46
| 343,030,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
import sys
from tkinter import Tk
from Model.kakuro import KakuroRandomGame
from Model.kakuro import KakuroCustomGame
from View.kakuroUI import KakuroUI
from View.login import main_account_screen
MARGIN = 20
SIDE = 50
WIDTH = HEIGHT = MARGIN * 2 + SIDE * 9
if __name__ == '__main__':
if len(sys.argv) != 2:
print ("Wrong number of arguments! Enter mode (custom or random) to run in as argument.\n"
"Choosing random...\n")
main_account_screen()
game = KakuroRandomGame()
root = Tk()
ui = KakuroUI(root, game)
root.geometry("%dx%d" % (WIDTH, HEIGHT + 40))
root.mainloop()
elif sys.argv[1]=='random':
main_account_screen()
game = KakuroRandomGame()
root = Tk()
ui = KakuroUI(root, game)
root.geometry("%dx%d" % (WIDTH, HEIGHT + 40))
root.mainloop()
elif sys.argv[1]=='custom':
main_account_screen()
game = KakuroCustomGame()
root = Tk()
ui = KakuroUI(root, game)
root.geometry("%dx%d" % (WIDTH, HEIGHT + 40))
root.mainloop()
else:
print ("Choosing random mode to start the game.")
main_account_screen()
game = KakuroRandomGame()
root = Tk()
ui = KakuroUI(root, game)
root.geometry("%dx%d" % (WIDTH, HEIGHT + 40))
root.mainloop()
|
[
"noreply@github.com"
] |
erfanmoghaddam.noreply@github.com
|
34f8525b93edf03f4a60ddccf6a1f187ff72a4c0
|
7e653f1ad58895dafb45704cebe15305440524cb
|
/copter_control_pkg/scripts/mission_realizer.py
|
d2354fbc31e6a357cbe639ad9a6537b16b0bfae9
|
[] |
no_license
|
VladislavBakaev/copter_airsim_ros
|
2200c2e436db43168070bfcf6727d16915a6e2fc
|
98550eebb0138d667e8e8cf60abbf6d2507b9176
|
refs/heads/master
| 2023-08-23T07:45:55.743397
| 2021-10-18T08:44:36
| 2021-10-18T08:44:36
| 415,342,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
#!/usr/bin/python3
import rospy
import rospkg
import os
import re
from airsim_ros_pkgs.srv import SetGPSPosition, Takeoff, SetLocalPosition, Land
class MissionRealizer():
def __init__(self, mission_file, vehicle_name) -> None:
self.mission_file = mission_file
self.vehicle_name = vehicle_name
self.mission_point = {}
self.set_gps_mission = rospy.ServiceProxy('/airsim_node/gps_goal', SetGPSPosition)
self.takeOff = rospy.ServiceProxy('/airsim_node/drone/takeoff', Takeoff)
self.land = rospy.ServiceProxy('/airsim_node/drone/land', Takeoff)
self.local_move = rospy.ServiceProxy('/airsim_node/local_position_goal', SetLocalPosition)
rospy.wait_for_service('/airsim_node/gps_goal')
self.loadMission()
self.realize()
def loadMission(self):
with open(self.mission_file, 'r') as txt_file:
data = txt_file.read()
data = re.findall(r'\[(\d+)\]\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)', data)
for point in data:
self.mission_point[point[0]] = {}
for i in range(1, len(point)):
param = point[i].split('=')
self.mission_point[point[0]].update({param[0]:float(param[1])})
self.iter_seq = sorted(self.mission_point.keys())
def realize(self):
self.takeOff(True)
self.local_move(0.0, 0.0, -5.0, 0.0, self.vehicle_name)
for i in range(len(self.mission_point)):
while (True):
try:
point = self.mission_point[self.iter_seq[i]]
lat = point['Lat']
lon = point['Lon']
alt = point['Alt']
yaw = point['Yaw']
self.set_gps_mission(lat, lon, alt, yaw, self.vehicle_name)
break
except KeyboardInterrupt:
return
except:
rospy.loginfo('Wait... Point'+str(i))
rospy.sleep(0.5)
self.land(True)
if __name__=="__main__":
rospy.init_node('mission_realizer_node', disable_signals=True)
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('copter_control_pkg')
file_path = os.path.join(pkg_path, 'mission', 'mission.txt')
vehicle_name = ""
while(vehicle_name == ""):
vehicle_name = rospy.get_param('/vehicle_name', vehicle_name)
rospy.loginfo("Wait vechical name")
mission_realizer = MissionRealizer(file_path, vehicle_name)
|
[
"bakaev.98@bk.ru"
] |
bakaev.98@bk.ru
|
73ee5bc743792d7853071e8aaddc341684714daf
|
8077a99a3578c6486606a48e0d2adfa5ca7da244
|
/dogsvscats/src/models/resnet.py
|
7530b7675b73777b0d6cd18ea1fdcbc34f108a8e
|
[] |
no_license
|
lovekesh-thakur/nnet_experiments
|
0f1b0395ca291fbbaacbb9849ff06734435ff4ab
|
1adb2bfe256db91ad9d8f9cc36b7cf2f08a4a1b7
|
refs/heads/main
| 2023-06-07T05:49:33.327626
| 2021-06-19T18:01:07
| 2021-06-19T18:01:07
| 343,152,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,308
|
py
|
# resnet architecture here
import torch
from torch import nn
from torch.nn import functional as F
class Residual(nn.Module):
"""
Residual block in Resnet Architecture
"""
def __init__(self, input_channels, output_channels, downsample = False):
"""
Args:
-----
input_channels : number of channels from the input
output_channels : number of output channels
downsample : True if we want to reduce feature size else False
"""
super(Residual, self).__init__()
self.downsample = downsample
if self.downsample:
strides = 2
else:
strides = 1
self.conv1 = nn.Conv2d(input_channels, output_channels,
kernel_size=3, stride=strides, padding=1)
self.conv2 = nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(output_channels)
self.bn2 = nn.BatchNorm2d(output_channels)
if self.downsample:
self.conv1x1 = nn.Conv2d(input_channels, output_channels, kernel_size=1,
stride=strides)
def forward(self, x):
"""
Forward method of Residual
Args:
-------
x : tensor of shape [N, C, H, W]
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample:
return F.relu(self.conv1x1(x) + out)
else:
return F.relu(out) + x
class Resnet34(nn.Module):
"""
Resnet 34 architecture
"""
def __init__(self, blocks = [3, 4, 6, 3], classes = 2):
super().__init__()
self.logSoftmax = nn.LogSoftmax(dim=1)
self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride = 2)
self.pool = nn.MaxPool2d(kernel_size=2)
inp_channels = 64
resid_blks = []
for ind, blk in enumerate(blocks):
if ind == 0:
out_channels = 1*inp_channels
else:
out_channels = 2*inp_channels
for i in range(blk):
if i == 0:
residual_block = Residual(inp_channels, out_channels, downsample=True)
else:
residual_block = Residual(out_channels, out_channels)
resid_blks.append(residual_block)
inp_channels = out_channels
self.subnet = nn.Sequential(*resid_blks)
self.global_pooling = nn.AvgPool2d(kernel_size=5)
self.fc1 = nn.Linear(out_channels, 1)
def forward(self, x):
out = self.pool(F.relu(self.conv1(x)))
out = self.global_pooling(self.subnet(out))
out = torch.flatten(out, 1)
out = self.fc1(out)
return torch.squeeze(out)
class Resnet18(nn.Module):
"""
Resnet 34 architecture
"""
def __init__(self, blocks = [2, 2, 2, 2], classes = 2):
super().__init__()
self.logSoftmax = nn.LogSoftmax(dim=1)
self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride = 2)
self.pool = nn.MaxPool2d(kernel_size=2)
inp_channels = 64
resid_blks = []
for ind, blk in enumerate(blocks):
if ind == 0:
out_channels = 1*inp_channels
else:
out_channels = 2*inp_channels
for i in range(blk):
if i == 0:
residual_block = Residual(inp_channels, out_channels, downsample=True)
else:
residual_block = Residual(out_channels, out_channels)
resid_blks.append(residual_block)
inp_channels = out_channels
self.subnet = nn.Sequential(*resid_blks)
self.global_pooling = nn.AvgPool2d(kernel_size=5)
self.fc1 = nn.Linear(out_channels, 1)
def forward(self, x):
out = self.pool(F.relu(self.conv1(x)))
out = self.global_pooling(self.subnet(out))
out = torch.flatten(out, 1)
out = self.fc1(out)
return torch.squeeze(out)
if __name__ == '__main__':
x = torch.ones((10, 3, 300, 300))
model = Resnet18()
print(model(x).shape)
|
[
"love.aiesec@gmail.com"
] |
love.aiesec@gmail.com
|
5f9894965d438460727d5f0e802821c2978f9d07
|
18d63fa314d8ca26042f69c086740a96039e6d3d
|
/3584/main.py
|
68ed7029890ec658cedfc93d25e0edc6672a9ddd
|
[] |
no_license
|
AswinBlue/acmicpc
|
58df7ce0c1313eeff4412b9067c1a2bc185f7041
|
02d719395d047d5c070b969710f9bc35346218e8
|
refs/heads/master
| 2023-01-23T17:13:55.816676
| 2023-01-17T12:32:50
| 2023-01-18T13:38:04
| 225,577,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
# https://www.acmicpc.net/problem/3584
# 가장 가까운 공통 조상
from sys import stdin, stdout
MAX_N = 10000
T = int(stdin.readline())
for t in range(T):
N = int(stdin.readline())
parent = [None for _ in range(N)]
depth = [0 for _ in range(N)]
# 간선 입력받음
for n in range(N-1):
a, b = map(int, stdin.readline().split())
parent[b-1] = a-1
root = parent.index(None) # parent가 없는 node는 root
# depth 계산 O(n*log(n))
for i in range(0, N):
ptr = i
d = 0
# 이미 계산한 node는 생략
if depth[ptr] != 0:
continue
# 올라가며 depth 계산
while ptr != None:
ptr = parent[ptr]
d += 1
# 자신 및 부모 node에 depth 적용
ptr = i
while ptr != None:
depth[ptr] = d
d -= 1
ptr = parent[ptr]
a, b = map(int, stdin.readline().split())
a -= 1
b -= 1
# 둘의 depth를 같게 만듬
while depth[a] > depth[b]:
a = parent[a]
while depth[a] < depth[b]:
b = parent[b]
while a != b:
a = parent[a]
b = parent[b]
# 결과 출력
stdout.write('{}\n'.format(a+1))
|
[
"aswindblew@gmail.com"
] |
aswindblew@gmail.com
|
33c3048b71874b62d0d4769e5907f4d68610b7b4
|
367ccdea467abe95ec510e70cdf47e951398f769
|
/daemon.py
|
25e95c4ae9a6a197424e4dfdfc0f9fe61cefcc74
|
[] |
no_license
|
ecarrera72/aula-control
|
40d399ba278616d85f953d021a977677bae72913
|
aa4e3d20775a3dd42497702b9e518942009599c1
|
refs/heads/main
| 2023-07-11T15:04:55.025204
| 2021-08-23T19:59:41
| 2021-08-23T19:59:41
| 399,231,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
from time import sleep, strftime
import json
import os
class daemon:
def __init__(self) -> None:
print( "{} Inicia Daemon".format( strftime( '%d-%m-%Y %X' ) ) )
path = os.path.dirname( __file__ )
with open( os.path.join( path, 'config.json' ), 'r' ) as file:
config = json.load( file )
while True:
try:
for process in config['SYSTEM']['PROCESS']:
self.checkProcess( os.path.join( path, process ) )
sleep( 5 )
except Exception as e:
print( e )
def checkProcess( self, process ):
if os.popen( "ps ax | grep -v grep | grep " + process ).read() == "":
print( "Iniciando " + process )
os.system( 'gnome-terminal -- python3 ' + process )
else:
print( 'Correcto {} {}'.format( strftime( '%d-%m-%Y %X' ), process ) )
daemon()
|
[
"ecarrera@aztektec.com.mx"
] |
ecarrera@aztektec.com.mx
|
98d3c3fa9303411183aac498a527848057897958
|
2cd2c8b22fbb49a3365be2171834dffd671c31ba
|
/bbqudasite/migrations/0003_auto_20200915_1652.py
|
bf366290b4a9db986d305f11e619155d5364fb29
|
[] |
no_license
|
0x305/bbquda_web
|
e570f983af8695858f34220e11ae1bb574b2de16
|
7dca277fff0311859493e073009457267ea437bd
|
refs/heads/master
| 2023-04-03T22:12:24.954927
| 2021-04-12T12:29:11
| 2021-04-12T12:29:11
| 295,261,336
| 1
| 3
| null | 2021-04-12T12:29:12
| 2020-09-14T00:11:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.1.1 on 2020-09-15 16:52
import bbqudasite.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bbqudasite', '0002_auto_20200914_1733'),
]
operations = [
migrations.AlterField(
model_name='csvupload',
name='file',
field=models.FileField(upload_to='media/csv/', validators=[bbqudasite.models.csv_file_validator]),
),
migrations.AlterField(
model_name='csvupload',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"me@hackerrank.com"
] |
me@hackerrank.com
|
bcab8414bbabc8f8f28964cda6b4c934df2d08fb
|
2429127550540dec33ed703f5056c9dd7af72a3c
|
/test26.py
|
8b96c91b42cceaa6106a51a5957bf51741915b6f
|
[] |
no_license
|
hehe1004/FindCarNumber
|
c621001f999e2e076defefa232c3a368e500a7f4
|
249983c638b6cc001039f51361a2d864ef6cbf3f
|
refs/heads/main
| 2023-08-18T23:37:20.773435
| 2021-10-01T07:36:25
| 2021-10-01T07:36:25
| 412,371,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,113
|
py
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pytesseract
plt.style.use('dark_background')
img_ori = cv2.imread('1.jpg')
height, width, channel = img_ori.shape
plt.figure(figsize=(12, 10))
plt.imshow(img_ori, cmap='gray')
# hsv = cv2.cvtColor(img_ori, cv2.COLOR_BGR2HSV)
# gray = hsv[:,:,2]
gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)
plt.figure(figsize=(12, 10))
plt.imshow(gray, cmap='gray')
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
plt.figure(figsize=(12, 10))
plt.imshow(gray, cmap='gray')
img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)
img_thresh = cv2.adaptiveThreshold(
img_blurred,
maxValue=255.0,
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY_INV,
blockSize=19,
C=9
)
plt.figure(figsize=(12, 10))
plt.imshow(img_thresh, cmap='gray')
contours, _ = cv2.findContours(
img_thresh,
mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE
)
temp_result = np.zeros((height, width, channel), dtype=np.uint8)
cv2.drawContours(temp_result, contours=contours, contourIdx=-1, color=(255, 255, 255))
plt.figure(figsize=(12, 10))
plt.imshow(temp_result)
temp_result = np.zeros((height, width, channel), dtype=np.uint8)
contours_dict = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(temp_result, pt1=(x, y), pt2=(x + w, y + h), color=(255, 255, 255), thickness=2)
# insert to dict
contours_dict.append({
'contour': contour,
'x': x,
'y': y,
'w': w,
'h': h,
'cx': x + (w / 2),
'cy': y + (h / 2)
})
plt.figure(figsize=(12, 10))
plt.imshow(temp_result, cmap='gray')
MIN_AREA = 80
MIN_WIDTH, MIN_HEIGHT = 2, 8
MIN_RATIO, MAX_RATIO = 0.25, 1.0
possible_contours = []
cnt = 0
for d in contours_dict:
area = d['w'] * d['h']
ratio = d['w'] / d['h']
if area > MIN_AREA \
and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
and MIN_RATIO < ratio < MAX_RATIO:
d['idx'] = cnt
cnt += 1
possible_contours.append(d)
# visualize possible contours
temp_result = np.zeros((height, width, channel), dtype=np.uint8)
for d in possible_contours:
# cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(d['x'] + d['w'], d['y'] + d['h']), color=(255, 255, 255),
thickness=2)
plt.figure(figsize=(12, 10))
plt.imshow(temp_result, cmap='gray')
MAX_DIAG_MULTIPLYER = 5 # 5
MAX_ANGLE_DIFF = 12.0 # 12.0
MAX_AREA_DIFF = 0.5 # 0.5
MAX_WIDTH_DIFF = 0.8
MAX_HEIGHT_DIFF = 0.2
MIN_N_MATCHED = 3 # 3
def find_chars(contour_list):
matched_result_idx = []
for d1 in contour_list:
matched_contours_idx = []
for d2 in contour_list:
if d1['idx'] == d2['idx']:
continue
dx = abs(d1['cx'] - d2['cx'])
dy = abs(d1['cy'] - d2['cy'])
diagonal_length1 = np.sqrt(d1['w'] ** 2 + d1['h'] ** 2)
distance = np.linalg.norm(np.array([d1['cx'], d1['cy']]) - np.array([d2['cx'], d2['cy']]))
if dx == 0:
angle_diff = 90
else:
angle_diff = np.degrees(np.arctan(dy / dx))
area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (d1['w'] * d1['h'])
width_diff = abs(d1['w'] - d2['w']) / d1['w']
height_diff = abs(d1['h'] - d2['h']) / d1['h']
if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
matched_contours_idx.append(d2['idx'])
# append this contour
matched_contours_idx.append(d1['idx'])
if len(matched_contours_idx) < MIN_N_MATCHED:
continue
matched_result_idx.append(matched_contours_idx)
unmatched_contour_idx = []
for d4 in contour_list:
if d4['idx'] not in matched_contours_idx:
unmatched_contour_idx.append(d4['idx'])
unmatched_contour = np.take(possible_contours, unmatched_contour_idx)
# recursive
recursive_contour_list = find_chars(unmatched_contour)
for idx in recursive_contour_list:
matched_result_idx.append(idx)
break
return matched_result_idx
result_idx = find_chars(possible_contours)
matched_result = []
for idx_list in result_idx:
matched_result.append(np.take(possible_contours, idx_list))
# visualize possible contours
temp_result = np.zeros((height, width, channel), dtype=np.uint8)
for r in matched_result:
for d in r:
# cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(d['x'] + d['w'], d['y'] + d['h']), color=(255, 255, 255),
thickness=2)
plt.figure(figsize=(12, 10))
plt.imshow(temp_result, cmap='gray')
PLATE_WIDTH_PADDING = 1.3 # 1.3
PLATE_HEIGHT_PADDING = 1.5 # 1.5
MIN_PLATE_RATIO = 3
MAX_PLATE_RATIO = 10
plate_imgs = []
plate_infos = []
for i, matched_chars in enumerate(matched_result):
sorted_chars = sorted(matched_chars, key=lambda x: x['cx'])
plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2
plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2
plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] - sorted_chars[0]['x']) * PLATE_WIDTH_PADDING
sum_height = 0
for d in sorted_chars:
sum_height += d['h']
plate_height = int(sum_height / len(sorted_chars) * PLATE_HEIGHT_PADDING)
triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
triangle_hypotenus = np.linalg.norm(
np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']])
)
angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))
rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx, plate_cy), angle=angle, scale=1.0)
img_rotated = cv2.warpAffine(img_thresh, M=rotation_matrix, dsize=(width, height))
img_cropped = cv2.getRectSubPix(
img_rotated,
patchSize=(int(plate_width), int(plate_height)),
center=(int(plate_cx), int(plate_cy))
)
if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / img_cropped.shape[
0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
continue
plate_imgs.append(img_cropped)
plate_infos.append({
'x': int(plate_cx - plate_width / 2),
'y': int(plate_cy - plate_height / 2),
'w': int(plate_width),
'h': int(plate_height)
})
plt.subplot(len(matched_result), 1, i + 1)
plt.imshow(img_cropped, cmap='gray')
longest_idx, longest_text = -1, 0
plate_chars = []
for i, plate_img in enumerate(plate_imgs):
plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
_, plate_img = cv2.threshold(plate_img, thresh=0.0, maxval=255.0, type=cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# find contours again (same as above)
contours, _ = cv2.findContours(plate_img, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)
plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
plate_max_x, plate_max_y = 0, 0
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
area = w * h
ratio = w / h
if area > MIN_AREA \
and w > MIN_WIDTH and h > MIN_HEIGHT \
and MIN_RATIO < ratio < MAX_RATIO:
if x < plate_min_x:
plate_min_x = x
if y < plate_min_y:
plate_min_y = y
if x + w > plate_max_x:
plate_max_x = x + w
if y + h > plate_max_y:
plate_max_y = y + h
img_result = plate_img[plate_min_y:plate_max_y, plate_min_x:plate_max_x]
img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
_, img_result = cv2.threshold(img_result, thresh=0.0, maxval=255.0, type=cv2.THRESH_BINARY | cv2.THRESH_OTSU)
img_result = cv2.copyMakeBorder(img_result, top=10, bottom=10, left=10, right=10,
borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0))
chars = pytesseract.image_to_string(img_result, lang='kor', config='--psm 7 --oem 0')
result_chars = ''
has_digit = False
for c in chars:
if ord('가') <= ord(c) <= ord('힣') or c.isdigit():
if c.isdigit():
has_digit = True
result_chars += c
print(result_chars)
plate_chars.append(result_chars)
if has_digit and len(result_chars) > longest_text:
longest_idx = i
plt.subplot(len(plate_imgs), 1, i + 1)
plt.imshow(img_result, cmap='gray')
info = plate_infos[longest_idx]
chars = plate_chars[longest_idx]
print(chars)
img_out = img_ori.copy()
cv2.rectangle(img_out, pt1=(info['x'], info['y']), pt2=(info['x'] + info['w'], info['y'] + info['h']),
color=(255, 0, 0), thickness=2)
cv2.imwrite(chars + '.jpg', img_out)
plt.figure(figsize=(12, 10))
plt.imshow(img_out)
|
[
"noreply@github.com"
] |
hehe1004.noreply@github.com
|
6cf42da48a3ef8cd2465928fc5fc1f8dc24b655a
|
859e1747ac5b29e2b0631475573f949d949942a8
|
/8_4/traffic_forwarding/client/client_config/config.py
|
091923115ca654fbe124d842a5fc62d84a7072d9
|
[] |
no_license
|
ecjtu-liubin/flow_agent_system_Server
|
acc3b09218fd62469660b49ff4607d7219865b76
|
22e17bc0b1d905297ef5ec8c8e8e5844f9936563
|
refs/heads/master
| 2023-07-13T02:43:28.932757
| 2021-08-04T12:08:22
| 2021-08-04T12:08:22
| 389,484,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# 配置文件
import os
Server_Ip = [
'192.168.43.30', # 手机wlan
'192.168.137.240', # tangjian Wlan
]
Server_Port = [
22222, 9999, 12345, 7890, 12813, 10086
]
Client_Ip = [
'192.168.43.30',
]
Client_Port = [
34812, 24374,
]
BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), "../..")) # 项目根目录
SERVER_DIR = os.path.abspath(os.path.join(os.getcwd(), "..")) # 上级目录
ABSOLUTE_DIR = os.path.abspath(os.path.dirname(__file__)) # 当前目录
mycertfile_path = os.path.join(SERVER_DIR, 'client_ssl\mycertfile.pem')
mykeyfile_path = os.path.join(SERVER_DIR, 'client_ssl\mykeyfile.pem')
SECRET_KEY = 'tangjian_liubin'
# 用户账号密码
USER = [['liu', 'bin'], []]
|
[
"1475538083@qq..com"
] |
1475538083@qq..com
|
84882c078ee6a5d711cee7e173f7d8e21cc6c518
|
292fd59011826c5471403cda2723885804fb76aa
|
/tuplas.py
|
f4f5a3a0b1de11bb81521a7e59a08eef62779d20
|
[] |
no_license
|
francocoin1701/cursoPython_nivel15
|
3a42549e174677350438a4b12290c8f972410933
|
0b011e3cec1cbb5f046f57917a26b5df80a66965
|
refs/heads/master
| 2022-12-08T10:17:50.673062
| 2020-07-02T23:09:36
| 2020-07-02T23:09:36
| 276,762,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
#crear una tupla apartir de las tres dadas la tupla creada debera contener las mascotas
mamiferos = ("tigre", "gato", "leon")
aves = ("aguila", "buitre", "canario")
reptiles = ("tortuga", "serpiente")
mascotas = mamiferos[1:2] + aves[2:] + reptiles[:1]
print(mascotas)
print(aves[1])
|
[
"franco1231701@gmail.com"
] |
franco1231701@gmail.com
|
6e4aefbfd1a8d898afce8ab5377f6238075a3968
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/content/app/DEPS
|
bd97c0a531f6732cbd614be590ff07eb91b9425d
|
[
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 433
|
include_rules = [
"+components/download",
"+content",
"+device/bluetooth",
"+device/gamepad",
# For loading V8's initial snapshot from external files.
"+gin/public/isolate_holder.h",
"+gin/public/snapshot_fd_data.h",
"+gin/v8_initializer.h",
"+services/network/public/cpp/features.h",
"+services/tracing/public/cpp",
"+services/service_manager/embedder",
"+services/service_manager/sandbox/sandbox_type.h",
]
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
|
e7bf08bb88b45dd669b7d9f5d7985a49d41ab0a2
|
fb2156803dbcd96a594feb7f4e84076477247f62
|
/LeetCode/35. Search Insert Position.py
|
2e2e9c6055c43dbec65413c0953e5d3aa04ac192
|
[] |
no_license
|
dynotw/Leetcode-Lintcode
|
dc9028cc6ffdeab9274d2c5771ca43a827014c16
|
b3d8534cdabd3b58fa13df457c65fb53ba37edd8
|
refs/heads/master
| 2022-07-26T21:34:50.221844
| 2022-07-12T12:58:23
| 2022-07-12T12:58:23
| 236,271,631
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# Question:
# Given a sorted array and a target value, return the index if the target is found.
# If not, return the index where it would be if it were inserted in order.
# You may assume no duplicates in the array.
# Answer:
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
if target in nums:
return nums.index(target)
# return the first index where target occurs
a = False
# 'a' is for the situation that target is bigger than all element in nums, so target must insert in the back of nums
for i in range(len(nums)):
# Don't forget use range(0), otherwise there will be no loop
if target <= nums[i]:
a = True
return i
if not a:
return len(nums)
|
[
"noreply@github.com"
] |
dynotw.noreply@github.com
|
6e789b8f8ce98360939f8cb4f3622d7a0cfa042d
|
1b02458400aaf667e1b4b67ec28bd519ecc1217d
|
/Zookeeper/Problems/Find even/task.py
|
4750ce1d656fc8ae800c3e31d78c530956ab03e7
|
[] |
no_license
|
GeezFORCE/JetBrainsAcademy
|
f8b405f5c9cb7915a142fe7ef4fec278edd80a08
|
fbb4e6ff442f4c8ee5e13d3ce4f43884274589b4
|
refs/heads/master
| 2022-06-18T08:11:35.693219
| 2020-05-10T07:01:11
| 2020-05-10T07:01:11
| 261,398,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
n = int(input())
count = 2
while(count<n):
print(count)
count+=2
|
[
"girigov39@gmail.com"
] |
girigov39@gmail.com
|
b3b28d40557b33eb2d00f07d9c79daae230d6e76
|
6202821cca2511e3e9cf2ca9aa7f9f95aebf5b71
|
/tests/qc/test_registration_qc.py
|
328fd947b52cbfc09218cad9cfd81334e58c7ce9
|
[
"BSD-2-Clause"
] |
permissive
|
rhytnen/ophys_etl_pipelines
|
7bd02cf7bc84872f8935d30e14d1b85d9c3b894e
|
cd955c77fa528b84946fa12b3d162d975a2bde8f
|
refs/heads/main
| 2023-03-02T07:25:33.160923
| 2021-02-04T00:06:52
| 2021-02-04T00:06:52
| 336,043,371
| 0
| 0
|
NOASSERTION
| 2021-02-04T18:16:35
| 2021-02-04T18:16:34
| null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
import pytest
import h5py
import numpy as np
import pandas as pd
from PIL import Image
from pathlib import Path
import ophys_etl.qc.registration_qc as rqc
@pytest.fixture
def rigid_motion_csv(tmp_path):
df = pd.DataFrame({
"framenumber": [0, 1, 2, 3],
"x": [1, 2, 1, -1],
"y": [2, 3, -2, 1],
"correlation": [0.01, 0.02, 0.02, 0.03]})
df_path = tmp_path / "rigid_motion.csv"
df.to_csv(df_path)
yield str(df_path)
@pytest.fixture
def videos(tmp_path):
myarr = np.random.randint(0, 1000, size=(100, 10, 10), dtype='uint16')
mypath1 = tmp_path / "video1.h5"
with h5py.File(mypath1, "w") as f:
f.create_dataset("data", data=myarr, chunks=(1, *myarr.shape[1:]))
mypath2 = tmp_path / "video2.h5"
with h5py.File(mypath2, "w") as f:
f.create_dataset("data", data=myarr, chunks=(1, *myarr.shape[1:]))
yield mypath1, mypath2
@pytest.fixture
def images(tmp_path):
myarr = np.random.randint(0, 255, size=(10, 10), dtype='uint8')
mypath1 = tmp_path / "image1.png"
with Image.fromarray(myarr) as im:
im.save(mypath1)
mypath2 = tmp_path / "image2.png"
with Image.fromarray(myarr) as im:
im.save(mypath2)
yield mypath1, mypath2
def test_registration_qc(tmp_path, images, videos, rigid_motion_csv):
"""
"""
args = {
"motion_diagnostics_output": rigid_motion_csv,
"movie_frame_rate_hz": 11.0,
"uncorrected_path": str(videos[0]),
"motion_corrected_output": str(videos[1]),
"max_projection_output": str(images[0]),
"avg_projection_output": str(images[1]),
"registration_summary_output": str(tmp_path / "summary.png"),
"motion_correction_preview_output": str(tmp_path / "preview.webm")}
reg = rqc.RegistrationQC(input_data=args, args=[])
reg.run()
for k in ['registration_summary_output',
'motion_correction_preview_output']:
assert Path(reg.args[k]).exists
|
[
"danielk@alleninstitute.org"
] |
danielk@alleninstitute.org
|
8dd519fac20a6e2e73b278799231b60adf4a75a2
|
b4ca4f637207810de2e8030544f428efb946fbb8
|
/ex12_triangle1.py
|
981013c8586ad67f4326907a0b4700ea9ff28654
|
[] |
no_license
|
eszkatya/test
|
8c617cdc876d207959f74afb195d2997ec4ff8f4
|
99e10b002bf7b1637e38126421d4064b974e0e76
|
refs/heads/master
| 2020-03-22T08:04:58.633374
| 2018-07-17T17:38:30
| 2018-07-17T17:38:30
| 139,743,420
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Create a function that prints a triangle like this, use [userinput]:
# *
# **
# ***
# ****
# *****
# ******
# It should take a number as parameter that describes how many lines the triangle has
userinput = int(input("pick a number!"))
def haromszog(userinput):
for i in range(1,(userinput+1)):
print (i * "$")
# print ("\n")
haromszog(3)
# a képen nem pont így néz ki, nem tudtad megoldani vagy így akartad?
|
[
"noreply@github.com"
] |
eszkatya.noreply@github.com
|
15022d6d0e3c946200dab0d91fbf74466d1abd6b
|
e6e9086725fe4e8494c0b5b4e2738667fac3f180
|
/Task4/Program/module/LatexGenerator.py
|
8cf75696b20c40c57766743397c2faf39ccb024d
|
[
"MIT"
] |
permissive
|
KKowalewski24/MUM
|
5c5f054f86d711feb0b3cf74b1a71ee7be5ce5b6
|
c70137e7646e33dd2c902d96ff8145e73ececc54
|
refs/heads/master
| 2023-05-14T07:57:56.590796
| 2021-06-08T12:42:09
| 2021-06-08T12:42:09
| 323,126,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,390
|
py
|
import os
from datetime import datetime
from typing import Any, List, Union
import pandas as pd
class LatexItem:
ampersand: str = " & "
centering: str = "\centering\n"
float_barrier: str = "\FloatBarrier\n"
class Table(LatexItem):
begin: str = "\\begin{table}[!htbp]\n"
back_slashes: str = "\\\\"
hline: str = "\hline\n"
end_tabular: str = "\end{tabular}\n"
end: str = "\end{table}\n"
def get_begin_tabular(self, table_width: int) -> str:
columns: str = ""
for i in range(table_width):
columns += "|c"
columns += "|"
return "\\begin{tabular}{" + columns + "}\n"
def get_caption(self, text: str) -> str:
replaced_text = replace_char_for_caption(text)
return "\caption\n[" + replaced_text + "]{" + replaced_text + "}\n"
def get_label(self, label: str) -> str:
return "\label{" + label + "}\n"
class Image(LatexItem):
begin: str = "\\begin{figure}[!htbp]\n"
include: str = "\includegraphics\n"
width: str = "[width=\\textwidth,keepaspectratio]\n"
end: str = "\end{figure}"
def __init__(self, directory_name: str) -> None:
self.directory_name = directory_name
def get_path(self, filename: str) -> str:
return "img/" + filename
def get_latex_path(self, filename: str) -> str:
return "{" + self.get_path(filename) + ".png}\n"
def get_caption(self, text: str) -> str:
replaced_text = replace_char_for_caption(text)
return "\caption\n[" + replaced_text + "]{" + replaced_text + "}\n"
def get_label(self, label: str) -> str:
return "\label{" + label + "}\n"
class LatexGenerator:
def __init__(self, dir_name: str = "") -> None:
self.dir_name = dir_name
self.table = Table()
self.image = Image(dir_name)
def generate_vertical_table_df(self, df: pd.DataFrame, filename: str) -> None:
result: str = self.table.begin + self.table.centering \
+ self.table.get_begin_tabular(len(df.columns)) + self.table.hline
header: str = ""
for i in range(len(df.columns)):
header += str(df.columns[i])
if i < len(df.columns) - 1:
header += self.table.ampersand
header += " " + self.table.back_slashes + " " + self.table.hline
body: str = ""
for i in range(len(df.values)):
for j in range(len(df.values[i])):
body += str(df.values[i][j])
if j < len(df.values[j]) - 1:
body += self.table.ampersand
body += " " + self.table.back_slashes + " " + self.table.hline
result += header + body + self.table.end_tabular + self.table.get_caption(filename) \
+ self.table.get_label(filename) + self.table.end + self.table.float_barrier
self._save_to_file(result, filename)
def generate_vertical_table(self, header_names: List[str],
body_values: List[List[float]],
filename: str) -> None:
result: str = "\\begin{minipage}{.24\\textwidth}\n" + self.table.centering \
+ self.table.get_begin_tabular(len(header_names)) + self.table.hline
header: str = ""
for i in range(len(header_names)):
header += header_names[i]
if i < len(header_names) - 1:
header += self.table.ampersand
header += " " + self.table.back_slashes + " " + self.table.hline
body: str = ""
for i in range(len(body_values)):
for j in range(len(body_values[i])):
body += str(body_values[i][j])
if j < len(body_values[i]) - 1:
body += self.table.ampersand
body += " " + self.table.back_slashes + " " + self.table.hline
result += header + body + self.table.end_tabular + self.table.get_caption(filename) \
+ self.table.get_label(filename) + "\end{minipage}\n\hfill\n"
self._save_to_file(result, filename)
def generate_horizontal_table_df(self, df: pd.DataFrame, filename: str) -> None:
result: str = self.table.begin + self.table.centering \
+ self.table.get_begin_tabular(len(df.columns) + 1) + self.table.hline
header: str = ""
for i in range(len(df.columns)):
header += str(df.columns[i])
if i <= len(df.columns) - 2:
header += self.table.ampersand
result += self.table.ampersand + header + " " \
+ self.table.back_slashes + " " + self.table.hline
body: str = ""
for i in range(len(df.values)):
body += str(df.index[i]) + self.table.ampersand
for j in range(len(df.values[i])):
body += str(df.values[i][j])
if j < len(df.values[i]) - 1:
body += self.table.ampersand
body += " " + self.table.back_slashes + " " + self.table.hline
result += body + self.table.end_tabular + self.table.get_caption(filename) \
+ self.table.get_label(filename) + self.table.end + self.table.float_barrier
self._save_to_file(result, filename)
def generate_horizontal_table(self, header_names: Union[List[str], List[int]],
horizontal_column_names: Union[List[str], List[int]],
body_values: Union[List[List[str]], List[List[float]]],
filename: str) -> None:
if len(horizontal_column_names) != len(body_values):
raise Exception(
"horizontal_column_names and body_values must have equal length"
)
result: str = self.table.begin + self.table.centering \
+ self.table.get_begin_tabular(len(body_values[0]) + 1) + self.table.hline
if self._compare_array_with_matrix_rows(header_names, body_values):
header: str = ""
for i in range(len(header_names)):
header += str(header_names[i])
if i < len(header_names) - 1:
header += self.table.ampersand
result += self.table.ampersand + header + " " \
+ self.table.back_slashes + " " + self.table.hline
body: str = ""
for i in range(len(body_values)):
body += str(horizontal_column_names[i]) + self.table.ampersand
for j in range(len(body_values[i])):
body += str(body_values[i][j])
if j < len(body_values[i]) - 1:
body += self.table.ampersand
body += " " + self.table.back_slashes + " " + self.table.hline
result += body + self.table.end_tabular + self.table.get_caption(filename) \
+ self.table.get_label(filename) + self.table.end + self.table.float_barrier
self._save_to_file(result, filename)
def generate_chart_image(self, filename: str) -> None:
result: str = self.image.begin + self.image.centering \
+ self.image.include + self.image.width
result += self.image.get_latex_path(filename)
result += self.image.get_caption(self._remove_png_extension(filename))
result += self.image.get_label(self._remove_png_extension(filename))
result += self.image.end
self._save_to_file(result, filename)
def _compare_array_with_matrix_rows(self, array: List[Any], matrix: List[List[Any]]) -> bool:
for item in matrix:
if len(array) != len(item):
return False
return True
def _save_to_file(self, data: str, filename: str) -> None:
path: str = ""
if self.dir_name != "":
path = self.dir_name + "/"
if not os.path.exists(self.dir_name):
os.makedirs(self.dir_name)
path += filename + "-" + datetime.now().strftime("%H%M%S") + ".txt"
with open(path, "w", encoding="UTF-8") as file:
file.write(data)
def _remove_png_extension(self, string: str) -> str:
return string.replace(".png", "")
def replace_char_for_caption(string: str) -> str:
chars: List[str] = ["-", "_"]
for char in chars:
string = string.replace(char, " ")
return string
|
[
"szamil24@gmail.com"
] |
szamil24@gmail.com
|
d56e930b77ee750f872008c40687197a2a4370a3
|
835734b9bbe59130dd19cc0b921ae73ec65e9e28
|
/tests/sequence_problems/test_parsing.py
|
80afb3f9aab7ca84dc369e67acd989a85d413151
|
[
"MIT"
] |
permissive
|
Vikdemen/RosalindPS
|
a588e76841692cffe2e3f45333b5875160e42e59
|
05cb3c2162e569bd92a99b9be127999cae1babf7
|
refs/heads/master
| 2022-04-09T18:49:28.885834
| 2020-03-06T22:11:17
| 2020-03-06T22:11:17
| 236,085,620
| 1
| 1
|
MIT
| 2020-03-02T02:03:13
| 2020-01-24T21:20:50
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from rps.sequence_problems.parsing import parse_fasta
def test_parse_fasta():
"""
Checks proper parsing of fasta files
:return:
"""
data = [">Tag1", "ATGC", "CGTA", "GGCC", ">Tag2", "ATGC", "AATT"]
output = parse_fasta(data)
output = [(line.sequence, line.tag) for line in output]
expected = [("ATGCCGTAGGCC", "Tag1"), ("ATGCAATT", "Tag2")]
assert output == expected
|
[
"viktor.demen@gmail.com"
] |
viktor.demen@gmail.com
|
f9f88e8a8f438d8d63664946a0b3c68b4e7a8d4b
|
20ace38b89c0ebaa0738753fcd11b0fdd4ed21cd
|
/pPb_2016_v0/tmp/src/HeavyIonsAnalysis/JetAnalysis/python/jets/akVsSoftDrop4PFJetSequence_pp_jec_cff.py
|
1b302118be177ed98c3da7ae5f015c542230a41c
|
[] |
no_license
|
ssanders50/pPb_2016_v0
|
3c32c2920067a2f8a0a7a7fadba6225babf9a905
|
9fc4ae61cf4343c88ce6666f55c0738f963754a3
|
refs/heads/master
| 2020-12-12T16:30:41.253014
| 2020-02-14T21:51:17
| 2020-02-14T21:51:17
| 234,162,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,546
|
py
|
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akVsSoftDrop4PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akVsSoftDrop4PFJets"),
matched = cms.InputTag("ak4GenJets"),
resolveByMatchQuality = cms.bool(True),
maxDeltaR = 0.4
)
akVsSoftDrop4PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akSoftDrop4GenJets"),
matched = cms.InputTag("ak4GenJets"),
resolveByMatchQuality = cms.bool(True),
maxDeltaR = 0.4
)
akVsSoftDrop4PFparton = patJetPartonMatch.clone(src = cms.InputTag("akVsSoftDrop4PFJets")
)
akVsSoftDrop4PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akVsSoftDrop4PFJets"),
payload = "AK4PF_offline"
)
akVsSoftDrop4PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akVsSoftDrop4CaloJets'))
#akVsSoftDrop4PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak4GenJets'))
akVsSoftDrop4PFbTagger = bTaggers("akVsSoftDrop4PF",0.4)
#create objects locally since they dont load properly otherwise
#akVsSoftDrop4PFmatch = akVsSoftDrop4PFbTagger.match
akVsSoftDrop4PFparton = patJetPartonMatch.clone(src = cms.InputTag("akVsSoftDrop4PFJets"), matched = cms.InputTag("genParticles"))
akVsSoftDrop4PFPatJetFlavourAssociationLegacy = akVsSoftDrop4PFbTagger.PatJetFlavourAssociationLegacy
akVsSoftDrop4PFPatJetPartons = akVsSoftDrop4PFbTagger.PatJetPartons
akVsSoftDrop4PFJetTracksAssociatorAtVertex = akVsSoftDrop4PFbTagger.JetTracksAssociatorAtVertex
akVsSoftDrop4PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akVsSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags = akVsSoftDrop4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akVsSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags = akVsSoftDrop4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akVsSoftDrop4PFCombinedSecondaryVertexBJetTags = akVsSoftDrop4PFbTagger.CombinedSecondaryVertexBJetTags
akVsSoftDrop4PFCombinedSecondaryVertexV2BJetTags = akVsSoftDrop4PFbTagger.CombinedSecondaryVertexV2BJetTags
akVsSoftDrop4PFJetBProbabilityBJetTags = akVsSoftDrop4PFbTagger.JetBProbabilityBJetTags
akVsSoftDrop4PFSoftPFMuonByPtBJetTags = akVsSoftDrop4PFbTagger.SoftPFMuonByPtBJetTags
akVsSoftDrop4PFSoftPFMuonByIP3dBJetTags = akVsSoftDrop4PFbTagger.SoftPFMuonByIP3dBJetTags
akVsSoftDrop4PFTrackCountingHighEffBJetTags = akVsSoftDrop4PFbTagger.TrackCountingHighEffBJetTags
akVsSoftDrop4PFTrackCountingHighPurBJetTags = akVsSoftDrop4PFbTagger.TrackCountingHighPurBJetTags
akVsSoftDrop4PFPatJetPartonAssociationLegacy = akVsSoftDrop4PFbTagger.PatJetPartonAssociationLegacy
akVsSoftDrop4PFImpactParameterTagInfos = akVsSoftDrop4PFbTagger.ImpactParameterTagInfos
akVsSoftDrop4PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akVsSoftDrop4PFJetProbabilityBJetTags = akVsSoftDrop4PFbTagger.JetProbabilityBJetTags
akVsSoftDrop4PFSecondaryVertexTagInfos = akVsSoftDrop4PFbTagger.SecondaryVertexTagInfos
akVsSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags = akVsSoftDrop4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akVsSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags = akVsSoftDrop4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akVsSoftDrop4PFCombinedSecondaryVertexBJetTags = akVsSoftDrop4PFbTagger.CombinedSecondaryVertexBJetTags
akVsSoftDrop4PFCombinedSecondaryVertexV2BJetTags = akVsSoftDrop4PFbTagger.CombinedSecondaryVertexV2BJetTags
akVsSoftDrop4PFSecondaryVertexNegativeTagInfos = akVsSoftDrop4PFbTagger.SecondaryVertexNegativeTagInfos
akVsSoftDrop4PFNegativeSimpleSecondaryVertexHighEffBJetTags = akVsSoftDrop4PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akVsSoftDrop4PFNegativeSimpleSecondaryVertexHighPurBJetTags = akVsSoftDrop4PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akVsSoftDrop4PFNegativeCombinedSecondaryVertexBJetTags = akVsSoftDrop4PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akVsSoftDrop4PFPositiveCombinedSecondaryVertexBJetTags = akVsSoftDrop4PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akVsSoftDrop4PFNegativeCombinedSecondaryVertexV2BJetTags = akVsSoftDrop4PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akVsSoftDrop4PFPositiveCombinedSecondaryVertexV2BJetTags = akVsSoftDrop4PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akVsSoftDrop4PFSoftPFMuonsTagInfos = akVsSoftDrop4PFbTagger.SoftPFMuonsTagInfos
akVsSoftDrop4PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akVsSoftDrop4PFSoftPFMuonBJetTags = akVsSoftDrop4PFbTagger.SoftPFMuonBJetTags
akVsSoftDrop4PFSoftPFMuonByIP3dBJetTags = akVsSoftDrop4PFbTagger.SoftPFMuonByIP3dBJetTags
akVsSoftDrop4PFSoftPFMuonByPtBJetTags = akVsSoftDrop4PFbTagger.SoftPFMuonByPtBJetTags
akVsSoftDrop4PFNegativeSoftPFMuonByPtBJetTags = akVsSoftDrop4PFbTagger.NegativeSoftPFMuonByPtBJetTags
akVsSoftDrop4PFPositiveSoftPFMuonByPtBJetTags = akVsSoftDrop4PFbTagger.PositiveSoftPFMuonByPtBJetTags
akVsSoftDrop4PFPatJetFlavourIdLegacy = cms.Sequence(akVsSoftDrop4PFPatJetPartonAssociationLegacy*akVsSoftDrop4PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akVsSoftDrop4PFPatJetFlavourAssociation = akVsSoftDrop4PFbTagger.PatJetFlavourAssociation
#akVsSoftDrop4PFPatJetFlavourId = cms.Sequence(akVsSoftDrop4PFPatJetPartons*akVsSoftDrop4PFPatJetFlavourAssociation)
akVsSoftDrop4PFJetBtaggingIP = cms.Sequence(akVsSoftDrop4PFImpactParameterTagInfos *
(akVsSoftDrop4PFTrackCountingHighEffBJetTags +
akVsSoftDrop4PFTrackCountingHighPurBJetTags +
akVsSoftDrop4PFJetProbabilityBJetTags +
akVsSoftDrop4PFJetBProbabilityBJetTags
)
)
akVsSoftDrop4PFJetBtaggingSV = cms.Sequence(akVsSoftDrop4PFImpactParameterTagInfos
*
akVsSoftDrop4PFSecondaryVertexTagInfos
* (akVsSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags+
akVsSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags+
akVsSoftDrop4PFCombinedSecondaryVertexBJetTags+
akVsSoftDrop4PFCombinedSecondaryVertexV2BJetTags
)
)
akVsSoftDrop4PFJetBtaggingNegSV = cms.Sequence(akVsSoftDrop4PFImpactParameterTagInfos
*
akVsSoftDrop4PFSecondaryVertexNegativeTagInfos
* (akVsSoftDrop4PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akVsSoftDrop4PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akVsSoftDrop4PFNegativeCombinedSecondaryVertexBJetTags+
akVsSoftDrop4PFPositiveCombinedSecondaryVertexBJetTags+
akVsSoftDrop4PFNegativeCombinedSecondaryVertexV2BJetTags+
akVsSoftDrop4PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akVsSoftDrop4PFJetBtaggingMu = cms.Sequence(akVsSoftDrop4PFSoftPFMuonsTagInfos * (akVsSoftDrop4PFSoftPFMuonBJetTags
+
akVsSoftDrop4PFSoftPFMuonByIP3dBJetTags
+
akVsSoftDrop4PFSoftPFMuonByPtBJetTags
+
akVsSoftDrop4PFNegativeSoftPFMuonByPtBJetTags
+
akVsSoftDrop4PFPositiveSoftPFMuonByPtBJetTags
)
)
akVsSoftDrop4PFJetBtagging = cms.Sequence(akVsSoftDrop4PFJetBtaggingIP
*akVsSoftDrop4PFJetBtaggingSV
*akVsSoftDrop4PFJetBtaggingNegSV
# *akVsSoftDrop4PFJetBtaggingMu
)
akVsSoftDrop4PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akVsSoftDrop4PFJets"),
genJetMatch = cms.InputTag("akVsSoftDrop4PFmatch"),
genPartonMatch = cms.InputTag("akVsSoftDrop4PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akVsSoftDrop4PFcorr")),
JetPartonMapSource = cms.InputTag("akVsSoftDrop4PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akVsSoftDrop4PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akVsSoftDrop4PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akVsSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akVsSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akVsSoftDrop4PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akVsSoftDrop4PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akVsSoftDrop4PFJetBProbabilityBJetTags"),
cms.InputTag("akVsSoftDrop4PFJetProbabilityBJetTags"),
#cms.InputTag("akVsSoftDrop4PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akVsSoftDrop4PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akVsSoftDrop4PFTrackCountingHighEffBJetTags"),
cms.InputTag("akVsSoftDrop4PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akVsSoftDrop4PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akVsSoftDrop4PFNjettiness = Njettiness.clone(
src = cms.InputTag("akVsSoftDrop4PFJets"),
R0 = cms.double( 0.4)
)
akVsSoftDrop4PFpatJetsWithBtagging.userData.userFloats.src += ['akVsSoftDrop4PFNjettiness:tau1','akVsSoftDrop4PFNjettiness:tau2','akVsSoftDrop4PFNjettiness:tau3']
akVsSoftDrop4PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akVsSoftDrop4PFpatJetsWithBtagging"),
genjetTag = 'ak4GenJets',
rParam = 0.4,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akVsSoftDrop4PF"),
jetName = cms.untracked.string("akVsSoftDrop4PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(False),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("akSoftDrop4GenJets"),
doGenTaus = True
)
akVsSoftDrop4PFJetSequence_mc = cms.Sequence(
#akVsSoftDrop4PFclean
#*
akVsSoftDrop4PFmatch
#*
#akVsSoftDrop4PFmatchGroomed
*
akVsSoftDrop4PFparton
*
akVsSoftDrop4PFcorr
*
#akVsSoftDrop4PFJetID
#*
akVsSoftDrop4PFPatJetFlavourIdLegacy
#*
#akVsSoftDrop4PFPatJetFlavourId # Use legacy algo till PU implemented
*
akVsSoftDrop4PFJetTracksAssociatorAtVertex
*
akVsSoftDrop4PFJetBtagging
*
akVsSoftDrop4PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
akVsSoftDrop4PFpatJetsWithBtagging
*
akVsSoftDrop4PFJetAnalyzer
)
akVsSoftDrop4PFJetSequence_data = cms.Sequence(akVsSoftDrop4PFcorr
*
#akVsSoftDrop4PFJetID
#*
akVsSoftDrop4PFJetTracksAssociatorAtVertex
*
akVsSoftDrop4PFJetBtagging
*
akVsSoftDrop4PFNjettiness
*
akVsSoftDrop4PFpatJetsWithBtagging
*
akVsSoftDrop4PFJetAnalyzer
)
akVsSoftDrop4PFJetSequence_jec = cms.Sequence(akVsSoftDrop4PFJetSequence_mc)
akVsSoftDrop4PFJetSequence_mb = cms.Sequence(akVsSoftDrop4PFJetSequence_mc)
akVsSoftDrop4PFJetSequence = cms.Sequence(akVsSoftDrop4PFJetSequence_jec)
akVsSoftDrop4PFJetAnalyzer.genPtMin = cms.untracked.double(1)
akVsSoftDrop4PFJetAnalyzer.jetPtMin = cms.double(1)
|
[
"ssanders@ku.edu"
] |
ssanders@ku.edu
|
b3be429a1f8d1a07612b63664c3e0c402c550e8a
|
4ce08889139ca81493262787dafda54bfddfd7b1
|
/board/models.py
|
8277b494fbb5238473807affcbc0c4e0f269b326
|
[] |
no_license
|
tawtas/messageBoard
|
ba12decea5ddc93bd8a3bb5e4237fd38d44c3ab9
|
ac37d84980e2bdc3f46f3aecec583fd26f07ba09
|
refs/heads/master
| 2020-04-25T11:36:13.792985
| 2019-02-26T16:34:31
| 2019-02-26T16:34:31
| 172,750,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Topic(models.Model):#Topic that useer can create and the link post # TODO:
title = models.CharField(max_length = 500)
class Post(models.Model):
content = models.TextField()
author = models.ForeignKey(User,on_delete=models.DO_NOTHING,)
topic = models.ForeignKey(Topic,on_delete=models.DO_NOTHING,)
|
[
"satwatmandal@gamil.com"
] |
satwatmandal@gamil.com
|
f26a934b92e61e2b0d2f96559f1732201d136f3f
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/detection/Faster_Mask_RCNN_for_PyTorch/detectron2/solver/lr_scheduler.py
|
33965c3b19c53647ac11f1a7f0ae9fe0b9ef8884
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,746
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from bisect import bisect_right
from typing import List
import torch
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iters
# instead of at 0. In the case that warmup_iters << max_iters the two are
# very close to each other.
return [
base_lr
* warmup_factor
* 0.5
* (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(
method: str, iter: int, warmup_iters: int, warmup_factor: float
) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
See :paper:`ImageNet in 1h` for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
elif method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method))
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
fcf586e2cd46c0c5b1ad72b6d2d3a3b4c7e70c3c
|
63fc7db58cef6c10622ae963e2cf7d89ffc665fe
|
/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py
|
62ab6998d058c9d420eb53289c667037aed6da07
|
[
"BSD-3-Clause"
] |
permissive
|
lexmz/coremltools
|
c3498d3a79a00806881833c61d0ad80fd226ace2
|
908ad2add3753b6b57336d6c26821e7074b9f669
|
refs/heads/main
| 2023-08-31T19:02:57.904850
| 2021-11-04T16:39:09
| 2021-11-04T16:39:09
| 424,665,865
| 0
| 0
|
BSD-3-Clause
| 2021-11-04T16:33:17
| 2021-11-04T16:33:17
| null |
UTF-8
|
Python
| false
| false
| 106,924
|
py
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import sys
import itertools
import numpy as np
from coremltools.models.utils import _python_version
from coremltools.models.utils import _macos_version
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.testing_reqs import *
from .testing_utils import *
from coremltools import TensorType
from coremltools._deps import version_lt
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 8), reason="Segfault with Python 3.8+"
) # rdar://problem/65730375
backends = testing_reqs.backends
torch = pytest.importorskip("torch")
torch.manual_seed(30)
np.random.seed(30)
# Set of common shapes for testing. Not all layers support 1D, so these two
# set of shapes are kept separate
COMMON_SHAPES = [(1, 10), (1, 5, 6), (1, 3, 5, 6), (1, 3, 4, 5, 6)]
COMMON_SHAPES_ALL = [(1, )] + COMMON_SHAPES
class TestAffineGrid(TorchBaseTest):
@pytest.mark.parametrize(
"backend, x_shape_and_target_size, "
"sampling_mode, padding_mode, align_corners",
itertools.product(
backends,
[
# shape format: (Batch, Channel, Height, Width)
[(1, 1, 3, 3), (1, 1, 3, 3)], # no size change
[(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling
[(3, 1, 6, 6), (3, 1, 8, 8)], # up-sampling
],
["bilinear"],
["zeros"],
[True],
),
)
def test(
self,
backend,
x_shape_and_target_size,
sampling_mode,
padding_mode,
align_corners,
):
if backend[0] == "neuralnetwork":
pytest.xfail("nn backend not supported")
x_shape, target_size = x_shape_and_target_size
theta = torch.rand((x_shape[0], 2, 3))
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.affine_grid = torch.nn.functional.affine_grid
self.grid_sample = torch.nn.functional.grid_sample
def forward(self, x):
grid = self.affine_grid(
theta=theta, size=target_size, align_corners=align_corners,
)
x = self.grid_sample(
x,
grid=grid,
mode=sampling_mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
return x
model = TestModule()
self.run_compare_torch(x_shape, model, backend=backend)
class TestGridSample(TorchBaseTest):
@pytest.mark.parametrize(
"backend, data_grid_shapes, mode, padding_mode, align_corners",
itertools.product(
backends,
[
# Input shape format: (Batch, C, Hin, Win)
# Grid shape format: (Batch, Hout, Wout, 2)
[(1, 1, 3, 3), (1, 3, 3, 2)], # no size change
[(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling
[(3, 1, 6, 6), (3, 8, 8, 2)], # up-sampling
],
["bilinear", "nearest"],
["zeros", "border", "reflection"],
[True, False],
),
)
def test(
self,
backend,
data_grid_shapes,
mode,
padding_mode,
align_corners,
):
if backend[0] == "neuralnetwork":
pytest.xfail("nn backend not supported")
params = {
"mode": mode,
"padding_mode": padding_mode,
"align_corners": align_corners,
}
model = ModuleWrapper(
function=torch.nn.functional.grid_sample, kwargs=params
)
self.run_compare_torch(data_grid_shapes, model, backend=backend)
class TestNLLLoss(TorchBaseTest):
@pytest.mark.parametrize(
"reduction, backend",
itertools.product(
["none", "sum", "mean"],
backends,
),
)
def test_nllloss(
self,
reduction,
backend,
):
class NLLLossModel(nn.Module):
def __init__(self):
super(NLLLossModel, self).__init__()
self.loss = nn.NLLLoss(reduction=reduction)
def forward(self, x, target):
loss = self.loss(x, target)
return loss
x = torch.randn(3, 5)
target = torch.tensor([1, 0, 4])
inputs = (x, target)
model = NLLLossModel()
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
class TestArgSort(TorchBaseTest):
@pytest.mark.parametrize(
"shape, axis, descending, backend",
itertools.product(
COMMON_SHAPES,
[-1, 0],
[True, False],
backends
)
)
def test_argsort(self, shape, axis, descending, backend):
model = ModuleWrapper(
function=torch.argsort, kwargs={"dim": axis, "descending": descending}
)
TorchBaseTest.run_compare_torch(shape, model, backend=backend)
class TestSort(TorchBaseTest):
@pytest.mark.parametrize(
"shape, axis, descending, backend",
itertools.product(
COMMON_SHAPES,
[-1, 0],
[True, False],
backends
)
)
def test_sort(self, shape, axis, descending, backend):
model = ModuleWrapper(
function=torch.sort, kwargs={"dim": axis, "descending": descending}
)
TorchBaseTest.run_compare_torch(shape, model, backend=backend)
class TestBatchNorm(TorchBaseTest):
@pytest.mark.parametrize(
"num_features, eps, affine, backend",
itertools.product([5, 3, 1], [0.1, 1e-05], [True, False], backends),
)
def test_batchnorm(self, num_features, eps, affine, backend):
model = nn.BatchNorm2d(num_features, eps, affine=affine)
self.run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize(
"affine, backend",
itertools.product([True, False], backends),
)
def test_batchnorm_2d_with_conv(self, affine, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3):
super(CRNNBase, self).__init__()
self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm2d(ch_out, affine=affine)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
self.run_compare_torch((1, 6, 15, 30), model, backend=backend)
@pytest.mark.parametrize(
"num_features, eps, affine, dynamic_input, backend",
itertools.product([5, 1], [0.1, 1e-05], [True, False], ["None", "Batch", "Height", "Width", "Depth", "All"], backends),
)
def test_batchnorm_3d(self, num_features, eps, affine, dynamic_input, backend):
model = nn.BatchNorm3d(num_features, eps, affine=affine)
input_shape = (6, num_features, 2, 3, 4)
if dynamic_input == "None":
self.run_compare_torch(
input_shape,
model,
backend=backend
)
else:
if dynamic_input == "Batch":
converter_input_type = [TensorType(shape=(6, num_features, 2, 3, 4), dtype=np.float32)]
converter_input_type = [TensorType(shape=(RangeDim(1, 10), num_features, 2, 3, 4), dtype=np.float32)]
elif dynamic_input == "Height":
converter_input_type = [TensorType(shape=(6, num_features, RangeDim(1, 10), 3, 4), dtype=np.float32)]
elif dynamic_input == "Width":
converter_input_type = [TensorType(shape=(6, num_features, 2, RangeDim(1, 10), 4), dtype=np.float32)]
elif dynamic_input == "Depth":
converter_input_type = [TensorType(shape=(6, num_features, 2, 3, RangeDim(1, 10)), dtype=np.float32)]
elif dynamic_input == "All":
converter_input_type = [TensorType(shape=(RangeDim(1, 10),
num_features,
RangeDim(1, 10),
RangeDim(1, 10),
RangeDim(1, 10)),
dtype=np.float32)]
self.run_compare_torch(
input_shape,
model,
backend=backend,
converter_input_type=converter_input_type
)
@pytest.mark.parametrize(
"rank, num_features, eps, training, backend",
itertools.product([3, 4, 5], [5, 1], [0.1, 1e-05], [True, False], backends),
)
def test_batchnorm_dynamic(self, rank, num_features, eps, training, backend):
model = ModuleWrapper(
nn.functional.batch_norm,
{"training": training, "eps": eps,},
)
input_shape = [6, num_features, 3, 4, 5]
input_shape = input_shape[:rank]
_input = torch.randn(*input_shape)
_mean = torch.randn(num_features)
_var = torch.randn(num_features)
inputs = (_input, _mean, _var)
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
@pytest.mark.parametrize(
"affine, backend",
itertools.product([True, False], backends),
)
def test_batchnorm_1d_with_conv(self, affine, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3):
super(CRNNBase, self).__init__()
self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm1d(ch_out, affine=affine)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
self.run_compare_torch((1, 6, 15), model, backend=backend)
@pytest.mark.parametrize(
"shape, eps, affine, backend",
itertools.product([(1, 10), (4, 6), (10, 1)], [0.1, 1e-05], [True, False], backends),
)
def test_batchnorm1d_rank2(self, shape, eps, affine, backend):
N,C = shape
batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval()
self.run_compare_torch(
(N, C), batchnorm, backend=backend,
)
@pytest.mark.parametrize(
"shape, eps, affine, backend",
itertools.product([(4, 8, 2), (1, 5, 3), (5, 10, 1), (6, 1, 4)], [0.1, 1e-05], [True, False], backends),
)
def test_batchnorm1d_rank3(self, shape, eps, affine, backend):
N,C,L = shape
batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval()
self.run_compare_torch(
(N, C, L), batchnorm, backend=backend,
)
class TestInstanceNorm(TorchBaseTest):
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 2, 1], [0.1, 1e-05], backends),
)
def test_instancenorm(self, num_features, eps, backend):
model = nn.InstanceNorm2d(num_features, eps)
self.run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize("num_features, backend",
itertools.product([5, 2, 1], backends),
)
def test_instancenorm_1d(self, num_features, backend):
model = nn.InstanceNorm1d(num_features)
self.run_compare_torch((6, num_features, 10), model, backend=backend)
class TestGroupNorm(TorchBaseTest):
@pytest.mark.parametrize(
"group_features, eps,affine, backend",
itertools.product([(16, 32), (1, 1)], [0.1, 1e-05],[True, False], backends),
)
def test_groupnorm(self, group_features, eps, affine, backend):
model = nn.GroupNorm(group_features[0],group_features[1], eps=eps, affine=affine)
self.run_compare_torch((6, group_features[1], 5, 5), model, backend=backend)
class TestLinear(TorchBaseTest):
@pytest.mark.parametrize(
"in_features, out_features, bias, backend",
itertools.product([5], [10], [True, False], backends),
)
def test_linear_rank1_input(self, in_features, out_features, bias, backend):
model = nn.Linear(in_features, out_features, bias=bias)
self.run_compare_torch((in_features,), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, bias, backend",
itertools.product([10, 25], [3, 6], [True, False], backends),
)
def test_linear_rank2_input(self, in_features, out_features, bias, backend):
model = nn.Linear(in_features, out_features, bias=bias)
self.run_compare_torch((1, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, bias, backend",
itertools.product([10], [6], [True, False], backends),
)
def test_linear_rank3_input(self, in_features, out_features, bias, backend):
model = nn.Linear(in_features, out_features, bias=bias)
self.run_compare_torch((1, 3, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, bias, backend",
itertools.product([10], [6], [True, False], backends),
)
def test_linear_rank4_input(self, in_features, out_features, bias, backend):
model = nn.Linear(in_features, out_features, bias=bias)
self.run_compare_torch((1, 5, 3, in_features), model, backend=backend)
class TestConv(TorchBaseTest):
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
[ (*param, bend) for param, bend in itertools.product([
(5, 3, 1, 1, 1, 2, 0, 1),
(3, 3, 1, 1, 1, 2, 1, 3),
(4, 3, 3, 3, 2, 2, 0, 1),
(7, 3, 3, 3, 1, 3, 0, 1),
(5, 5, 3, 3, 1, 3, 0, 1),
(3, 5, 3, 3, 1, 3, 0, 1),
(3, 5, 3, 3, 1, 3, 1, 3),
(7, 5, 3, 3, 2, 3, 1, 3),
], backends)
],
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
self.run_compare_torch((1, in_channels, height, width), model,
backend=backend)
class TestDynamicConv(TorchBaseTest):
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, backend",
[ (*param, bend) for param, bend in itertools.product([
(5, 1, 1, 1, 2, 1),
(3, 1, 1, 1, 2, 3),
(4, 3, 3, 1, 2, 1),
(7, 3, 3, 1, 3, 1),
(5, 3, 3, 2, 2, 1),
(3, 3, 3, 1, 3, 1),
(3, 3, 3, 1, 3, 3),
(7, 3, 3, 3, 1, 3),
], backends)
],
)
def test_convolution1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
backend,
groups=1,
):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported on ML Program backend")
class DynamicConv(nn.Module):
def __init__(self):
super(DynamicConv, self).__init__()
def forward(self, input_data, weights):
return nn.functional.conv1d(
input_data,
weights,
stride=stride,
padding=padding
)
model = DynamicConv()
self.run_compare_torch([(1, in_channels, width), (out_channels, int(in_channels/groups), kernel_size)],
model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
[ (*param, bend) for param, bend in itertools.product([
(5, 3, 1, 1, 1, 2, 0, 1),
(3, 3, 1, 1, 1, 2, 1, 3),
(4, 3, 3, 3, 1, 2, 0, 1),
(7, 3, 3, 3, 1, 3, 0, 1),
(5, 5, 3, 3, 2, 1, 0, 1),
(3, 5, 3, 3, 1, 3, 0, 1),
(3, 5, 3, 3, 1, 3, 1, 3),
(7, 5, 3, 3, 2, 3, 1, 3),
], backends)
],
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
class DynamicConv(nn.Module):
def __init__(self):
super(DynamicConv, self).__init__()
def forward(self, input_data, weights):
return nn.functional.conv2d(
input_data,
weights,
stride=stride,
padding=padding
)
model = DynamicConv()
self.run_compare_torch([(1, in_channels, height, width), (out_channels, int(in_channels/groups), kernel_size, kernel_size)],
model, backend=backend)
class TestConvTranspose(TorchBaseTest):
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
[ (*param, bend) for param, bend in itertools.product([
(3, 1, 1, 1, 2, 0, 1),
(3, 1, 1, 1, 2, 1, 3),
(3, 3, 3, 1, 2, 0, 1),
(3, 3, 3, 1, 3, 0, 1),
(5, 3, 3, 1, 3, 0, 1),
(5, 3, 3, 1, 3, 0, 1),
(5, 3, 3, 1, 3, 1, 3),
(5, 3, 3, 1, 3, 1, 3),
], backends)
],
)
def test_convolution_transpose1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups
)
self.run_compare_torch((1, in_channels, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
[ (*param, bend) for param, bend in itertools.product([
(5, 5, 1, 1, 1, 2, 0, 1),
(5, 5, 1, 1, 1, 2, 1, 3),
(5, 5, 3, 3, 1, 2, 0, 1),
(5, 5, 3, 3, 1, 3, 0, 1),
(6, 5, 3, 3, 1, 3, 0, 1),
(6, 5, 3, 3, 1, 3, 0, 1),
(6, 5, 3, 3, 1, 3, 1, 3),
(6, 5, 3, 3, 1, 3, 1, 3),
], backends)
],
)
def test_convolution_transpose2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
self.run_compare_torch((1, in_channels, height, width), model,
backend=backend)
@pytest.mark.parametrize(
"dynamic_input, backend",
itertools.product(
[True, False], backends
),
)
def test_convolution_transpose2d_dynamic_input(
self,
dynamic_input,
backend,
):
in_channels = 5
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=10,
kernel_size=3,
stride=2,
padding=1,
dilation=3,
)
in_height = 256
in_width = 512
input_shape = (1, in_channels, in_height, in_width)
if dynamic_input:
converter_input_type = [TensorType(shape=(1, in_channels, RangeDim(256, -1), RangeDim(256, -1)), dtype=np.float32)]
self.run_compare_torch(
input_shape,
model,
backend=backend,
converter_input_type=converter_input_type
)
else:
self.run_compare_torch(
input_shape,
model,
backend=backend
)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, output_padding, backend",
[ (*param, bend) for param, bend in itertools.product([
(5, 5, 1, 1, 1, 2, 1, 1, 1),
(5, 5, 1, 1, 1, 2, 2, 3, 2),
(5, 5, 3, 3, 1, 2, 0, 1, 0),
(5, 5, 3, 3, 1, 3, 1, 1, 1),
(6, 5, 3, 3, 1, 3, 2, 1, 2),
(6, 5, 3, 3, 1, 3, 1, 1, 1),
(6, 5, 3, 3, 1, 3, 2, 3, 2),
(6, 5, 3, 3, 1, 3, 3, 3, 3),
], backends)
]+ [
pytest.param(
5, 5, 1, 1, 3, 4, 1, 1, 2, "neuralnetwork", marks=pytest.mark.xfail
),
pytest.param(
5, 5, 1, 1, 3, 2, 1, 3, 2, "neuralnetwork", marks=pytest.mark.xfail
),
],
)
def test_convolution_transpose2d_output_padding(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
output_padding,
backend,
groups=1,
):
# Output padding must be less than either stride or dilation
# Skip testing invalid combinations
if isinstance(output_padding, int):
if output_padding >= stride and output_padding >= dilation:
return
elif isinstance(output_padding, tuple):
for _output_padding in output_padding:
if _output_padding >= stride and _output_padding >= dilation:
return
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
)
self.run_compare_torch((1, in_channels, height, width), model,
backend=backend)
@pytest.mark.parametrize(
"depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
[ (*param, bend) for param, bend in itertools.product([
(3, 5, 5, 1, 1, 1, 2, 0, 1),
(3, 5, 5, 1, 1, 1, 2, 1, 3),
(3, 5, 5, 3, 3, 1, 2, 0, 1),
(3, 5, 5, 3, 3, 1, 1, 0, 2),
(4, 6, 5, 3, 3, 1, 3, 0, 1),
(4, 6, 5, 3, 3, 1, 3, 1, 2),
(4, 6, 5, 3, 3, 1, 3, 1, 3),
], backends)
],
)
def test_convolution_transpose3d(
self,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
):
model = nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
self.run_compare_torch((1, in_channels, depth, height, width), model,
backend=backend)
class TestCond(TorchBaseTest):
@pytest.mark.parametrize(
"use_cpu_for_conversion, backend", itertools.product([True, False], backends)
)
def test_cond(self, use_cpu_for_conversion, backend):
if backend[0] == "mlprogram":
pytest.skip("rdar://81169758 (Cond tests hang on mlprogram backend)")
if backend[0] == "mlprogram" and not use_cpu_for_conversion:
pytest.xfail("rdar://78343191 ((MIL GPU) Core ML Tools Unit Test failures [failure to load or Seg fault])")
in_features = 1
out_features = 2
class TestNet(nn.Module):
def forward(self, x):
if torch.squeeze(x) < 10.:
return x*10.
else:
return x*2.
model = TestNet().eval()
torch_model = torch.jit.script(model)
self.run_compare_torch(torch.tensor([1.]), torch_model,
input_as_shape=False, backend=backend,
use_cpu_for_conversion=use_cpu_for_conversion)
self.run_compare_torch(torch.tensor([11.]), torch_model,
input_as_shape=False, backend=backend,
use_cpu_for_conversion=use_cpu_for_conversion)
class TestLoop(TorchBaseTest):
@pytest.mark.parametrize("backend", backends)
def test_for_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 2.0 * x
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
for _ in range(7):
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
self.run_compare_torch(model.input_size, torch_model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_while_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 0.5 * x
return x
class TestNet(nn.Module):
input_size = (1,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
while x > 0.01:
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
self.run_compare_torch(model.input_size, torch_model, backend=backend)
class TestUpsample(TorchBaseTest):
@pytest.mark.parametrize(
"output_size, align_corners, backend",
itertools.product(
[(10, 10), (1, 1), (2, 3), (190, 170)],
[True, False],
backends,
)
)
def test_upsample_bilinear2d_with_output_size(
self, output_size, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"size": output_size, "mode": "bilinear", "align_corners": align_corners,},
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, recompute_scale_factor, backend",
itertools.product(
[2, 0.5, 4.1], [3, 0.5, 5.3], [True, False], [True, False], backends
)
)
def test_upsample_bilinear2d_with_scales(
self, scales_h, scales_w, align_corners, recompute_scale_factor, backend
):
def _is_float_value(x, threshold=0.001):
return x - np.floor(x) > threshold
Height = 8
Width = 22
input_shape = (1, 3, Height, Width)
output_h = Height * scales_h
output_w = Width * scales_w
is_h_float = _is_float_value(output_h)
is_w_float = _is_float_value(output_w)
if (is_h_float or is_w_float) and not align_corners and not recompute_scale_factor:
pytest.xfail("rdar://81124053 (Support recompute_scale_factor)")
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
"recompute_scale_factor": recompute_scale_factor,
},
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"output_size, backend",
itertools.product(
[(10, 10), (190, 170)], backends
)
)
def test_upsample_nearest2d_with_output_size(self, output_size, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate, {"size": output_size, "mode": "nearest"},
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
itertools.product([2, 3, 4.5], [4, 5, 5.5], backends),
)
def test_upsample_nearest2d_with_scales(self, scales_h, scales_w, backend):
if backend[0] == "neuralnetwork":
if isinstance(scales_h, float) or isinstance(scales_w, float):
return # Skip fractional scale factors tests for neuralnetwork
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest"},
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
itertools.product([2, 3], [4, 5], backends),
)
def test_upsample_nearest2d_with_scales_dynamic(self, scales_h, scales_w, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest", "recompute_scale_factor": True,},
)
converter_input_type = [TensorType(shape=(1, 3, RangeDim(), RangeDim()), dtype=np.float32)]
mlmodel = self.run_compare_torch(input_shape, model,
backend=backend,
converter_input_type=converter_input_type)[1]
# also check if the scale factor are integers
if backend[0] == 'neuralnetwork':
for layer in mlmodel._spec.neuralNetwork.layers:
if layer.WhichOneof('layer') == "upsample":
assert len(layer.upsample.fractionalScalingFactor) == 0
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, recompute_scale_factor, backend",
itertools.product(
[2, 3.6], [4, 0.7], [True, False], [True, False], backends
)
)
def test_upsample_bilinear2d_with_scales_dynamic(
self, scales_h, scales_w, align_corners, recompute_scale_factor, backend
):
def _is_float_value(x, threshold=0.001):
return x - np.floor(x) > threshold
is_h_float = _is_float_value(scales_h)
is_w_float = _is_float_value(scales_w)
input_shape = (1, 3, 9, 22)
if (is_h_float or is_w_float) and not align_corners and not recompute_scale_factor:
pytest.xfail("rdar://81124053 (Support recompute_scale_factor)")
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
"recompute_scale_factor": recompute_scale_factor,
},
)
converter_input_type = [TensorType(shape=(1, 3, RangeDim(default=9), RangeDim(default=22)), dtype=np.float32)]
mlmodel = self.run_compare_torch(input_shape, model,
backend=backend,
converter_input_type=converter_input_type)[1]
# also check if the scale factor are integers
if backend[0] == 'neuralnetwork' and not is_h_float and not is_w_float:
for layer in mlmodel._spec.neuralNetwork.layers:
if layer.WhichOneof('layer') == "upsample":
assert len(layer.upsample.fractionalScalingFactor) == 0
class TestBranch(TorchBaseTest):
@pytest.mark.parametrize("backend", backends)
def test_if(self, backend):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported on ML Program backend")
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = torch.mean(x)
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
m = self.layer(x)
if m < 0:
scale = -2.0
else:
scale = 2.0
x = scale * x
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
self.run_compare_torch(model.input_size, torch_model, backend=backend)
class TestAvgPool(TorchBaseTest):
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
[ (*param, bend) for param, bend in itertools.product([
((1, 3, 5), 1, 1, 0, True, True),
((1, 3, 5), 3, 1, 0, False, True),
((1, 3, 5), 1, 2, 1, False, False),
((1, 3, 5), 3, 2, 1, False, True),
((1, 3, 5), 1, 2, 0, False, True),
((1, 3, 10), 1, 1, 1, False, False),
((1, 3, 10), 3, 1, 0, False, False),
((1, 3, 10), 1, 2, 1, True, True),
((1, 3, 10), 3, 2, 0, True, False),
((1, 3, 10), 1, 1, 1, True, True),
], backends)
],
)
def test_avg_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
if kernel_size == 1 and stride == 2 and padding == 0 and ceil_mode and input_shape[-1] % 2 == 0:
pytest.xfail(reason="rdar://73894185 (CoreML sometimes returns 'nan's "
"for avg_pool when ceil_mode is True and kernel=1,stride=2,pad=0)")
model = nn.AvgPool1d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
[ (*param, bend) for param, bend in itertools.product([
((1, 3, 5, 5), 1, 1, 0, True, True),
((1, 3, 5, 5), 3, 1, 0, False, True),
((1, 3, 5, 5), 1, 2, 1, False, False),
((1, 3, 5, 5), 3, 2, 1, False, True),
((1, 3, 5, 5), 1, 2, 0, False, True),
((1, 3, 10, 10), 1, 1, 1, False, False),
((1, 3, 10, 10), 3, 1, 0, False, False),
((1, 3, 10, 10), 1, 2, 1, True, True),
((1, 3, 10, 10), 3, 2, 0, True, False),
((1, 3, 10, 10), 1, 1, 1, True, True),
], backends)
],
)
def test_avg_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
if kernel_size == 1 and stride == 2 and padding == 0 and ceil_mode and \
(input_shape[-2] % 2 == 0 or input_shape[-1] % 2 == 0):
pytest.xfail(reason="rdar://73894185 (CoreML sometimes returns 'nan's "
"for avg_pool when ceil_mode is True and kernel=1,stride=2,pad=0)")
model = nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
[ (*param, bend) for param, bend in itertools.product([
((1, 3, 11, 5, 5), 1, 1, 0, True, True),
((1, 3, 11, 5, 5), 3, 1, 0, False, True),
((1, 3, 11, 5, 5), 1, 2, 1, False, False),
((1, 3, 11, 5, 5), 3, 2, 1, False, True),
((1, 3, 11, 5, 5), 1, 2, 0, False, True),
((1, 3, 6, 10, 10), 1, 1, 1, False, False),
((1, 3, 6, 10, 10), 3, 1, 0, False, False),
((1, 3, 6, 10, 10), 1, 2, 1, True, True),
((1, 3, 6, 10, 10), 3, 2, 0, True, False),
((1, 3, 6, 10, 10), 1, 1, 1, True, True),
], backends)
]
)
def test_avg_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
if kernel_size == 1 and stride == 2 and padding == 0 and ceil_mode and \
(input_shape[-3] % 2 == 0 or input_shape[-2] % 2 == 0 or input_shape[-1] % 2 == 0):
pytest.xfail(reason="rdar://73894185 (CoreML sometimes returns 'nan's "
"for avg_pool when ceil_mode is True and kernel=1,stride=2,pad=0)")
if include_pad and ceil_mode and stride > 1:
# skip: MIL/CoreML does not support this configuration
# rdar://73723194
return
model = nn.AvgPool3d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
self.run_compare_torch(input_shape, model, backend=backend)
class TestAdaptiveMaxPool(TorchBaseTest):
@pytest.mark.parametrize(
"output_size, magnification, delta, depth, backend",
itertools.product(
[(1,1), (3,2)],
[1, 2, 7],
[0, 11],
[1, 2, 3],
backends,
),
)
def test_adaptive_max_pool2d(
self, output_size, magnification, delta, depth, backend
):
# input_size = output_size * magnification + delta
input_size = (delta + magnification * output_size[0], delta + magnification * output_size[1])
# since coremltools reproduces PyTorch's kernel sizes and
# offsets for adaptive pooling layers only when input_size is
# a multiple of output_size, we expect failures otherwise
if not (input_size[0] % output_size[0] == 0 and input_size[1] % output_size[1] == 0):
pytest.xfail("Test should fail because input_size is not a multiple of output_size")
n = 1
in_shape = (n,depth) + input_size
model = nn.AdaptiveMaxPool2d(
output_size
)
self.run_compare_torch(in_shape, model, backend=backend)
class TestMaxPool(TorchBaseTest):
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7)],
[1, 3],
[1, 2],
[0, 1],
[True, False],
backends,
),
)
def test_max_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2:
if input_shape[-1] % 2 == 0:
# TODO: is this a valid case?
# in this case, torch adds "-inf" values at the border, post max pool operation
return
model = nn.MaxPool1d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7)],
[1, 3],
[1, 2],
[0, 1],
[True, False],
backends,
),
)
def test_max_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2:
for r in range(2,4):
if input_shape[r] % 2 == 0:
# TODO: is this a valid case?
# in this case, torch adds "-inf" values at the border, post max pool operation
return
model = nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7)],
[1, 3],
[1, 2],
[0, 1],
[True, False],
backends,
),
)
def test_max_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2:
for r in range(2,5):
if input_shape[r] % 2 == 0:
# TODO: is this a valid case?
# in this case, torch adds "-inf" values at the border, post max pool operation
return
model = nn.MaxPool3d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
self.run_compare_torch(input_shape, model, backend=backend)
class TestMaximumMinimum(TorchBaseTest):
@pytest.mark.parametrize(
"input_shape, mode, backend",
itertools.product(
[(2, 5, 7, 3), (3, 2, 9)],
["minimum", "maximum"],
backends,
),
)
def test_minimum_maximum(self, input_shape, mode, backend):
class TestModel(torch.nn.Module):
def forward(self, x, y):
if mode == "minimum":
return torch.minimum(x, y)
elif mode == "maximum":
return torch.maximum(x, y)
else:
raise ValueError("Unsupported mode: {mode}".format(mode=mode))
model = TestModel()
self.run_compare_torch([input_shape] * 2, model, backend=backend)
class TestPoolSymbolicInput(TorchBaseTest):
def test_max_pool(self):
model = nn.MaxPool2d(
kernel_size=1,
stride=2,
padding=0,
dilation=1,
ceil_mode=True,
)
input_shape = (1, 1, 11, 11)
converter_input_type = [TensorType(shape=(1, 1, RangeDim(), RangeDim()), dtype=np.float32)]
self.run_compare_torch(input_shape, model,
backend=backends[0],
converter_input_type=converter_input_type)
def test_avg_pool(self):
model = nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=1,
count_include_pad=True,
ceil_mode=True,
)
input_shape = (1, 2, 15, 15)
converter_input_type = [TensorType(shape=(1, 2, RangeDim(), RangeDim()), dtype=np.float32)]
self.run_compare_torch(input_shape, model,
backend=backends[0],
converter_input_type=converter_input_type)
class TestLSTM(TorchBaseTest):
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[ (*param, bend) for param, bend in itertools.product([
(1, 1, 1, True, True, 0.3, True),
(1, 1, 1, False, True, 0.3, False),
(1, 1, 1, False, True, 0.3, True),
(3, 1, 5, True, False, 0.3, False),
(3, 1, 5, True, True, 0.3, True),
(3, 7, 5, True, False, 0.3, False),
(3, 7, 5, False, True, 0.3, True),
(3, 7, 5, False, True, 0.3, False),
], backends)
],
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
model.eval()
num_directions = int(bidirectional) + 1
if batch_first:
_input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, (h0, c0))
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
class TestRNN(TorchBaseTest):
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, activation, backend",
[ (*param, bend) for param, bend in itertools.product([
(1, 1, 1, True, True, 0.3, "tanh"),
(1, 1, 1, False, True, 0.3, "relu"),
(1, 1, 1, False, True, 0.3, "tanh"),
(3, 1, 5, True, False, 0.3, "relu"),
(3, 1, 5, True, True, 0.3, "tanh"),
(3, 7, 5, True, False, 0.3, "relu"),
(3, 7, 5, False, True, 0.3, "relu"),
(3, 7, 5, False, True, 0.3, "tanh"),
], backends)
],
)
def test_rnn(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
activation,
backend,
):
SEQUENCE_LENGTH = 10
BATCH_SIZE = 3
model = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
nonlinearity=activation,
bidirectional=False, # bi-directional simple RNN not supported
)
model.eval()
num_directions = 1
if batch_first:
_input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, h0)
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
class TestGRU(TorchBaseTest):
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[ (*param, bend) for param, bend in itertools.product([
(1, 1, 1, True, True, 0.3, True),
(1, 1, 1, False, True, 0.3, True),
(1, 1, 1, False, True, 0.3, False),
(3, 1, 5, True, False, 0.3, False),
(3, 1, 5, True, True, 0.3, True),
(3, 7, 5, True, False, 0.3, False),
(3, 7, 5, False, True, 0.3, False),
(3, 7, 5, False, True, 0.3, True),
], backends)
],
)
def test_gru(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
SEQUENCE_LENGTH = 10
BATCH_SIZE = 3
model = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
model.eval()
num_directions = int(bidirectional) + 1
if batch_first:
_input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, h0)
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
class TestLSTMWithPackedSequence(TorchBaseTest):
@pytest.mark.parametrize(
"pack_batch_first, pad_batch_first, LSTM_batch_first, pad_value, backend",
itertools.product(
[True, False], [True, False], [True, False], [-1,0], backends
),
)
def test_lstm(
self,
pack_batch_first,
pad_batch_first,
LSTM_batch_first,
pad_value,
backend,
):
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
input_size = 4
hidden_size = 6
num_layers = 1
bias = True
class Encoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=LSTM_batch_first,
bidirectional=False,
dropout=0.0,
)
def forward(self, batch_in, seq_lengths):
packed_input = pack_padded_sequence(batch_in, seq_lengths, batch_first=pack_batch_first)
output_packed, (hidden, _) = self.lstm(packed_input)
output, _ = pad_packed_sequence(output_packed, padding_value=pad_value, batch_first=pad_batch_first)
return output
SEQUENCE_LENGTH = 10
BATCH_SIZE = 3
model = Encoder()
model.eval()
if pack_batch_first:
_input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
seq_lengths = torch.tensor([10, 5, 1], dtype=int)
inputs = (_input, seq_lengths)
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
# Workaround for GitHub Issue #824
# i.e. the return h_n/c_n for a converted BLSTM are mangled.
# Therefore, just look at output 'y' (for now) which is correct.
class StripCellAndHidden(nn.Module):
def __init__(self,flagReturnTuple_):
super(StripCellAndHidden, self).__init__()
self.flagReturnTuple = flagReturnTuple_
def forward(self,x):
# Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:"
# Pass tensor when we need input for LSTM #2 as part of nn.Sequential()
return tuple(x[0]) if self.flagReturnTuple else x[0]
# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True
class TestStackedBLSTM(TorchBaseTest):
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.Sequential(
nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(False),
nn.LSTM(
input_size=2*hidden_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(True)
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
# Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824
expected_results = model(_input)
self.run_compare_torch(_input, model, expected_results,
input_as_shape=False, backend=backend)
class TestConcat(TorchBaseTest):
# This tests an edge case where the list of tensors to concatenate only
# has one item. NN throws an error for this case, hence why we have to
# run through the full conversion process to test it.
@pytest.mark.parametrize("backend", backends)
def test_cat(self, backend):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x):
x = torch.cat((x,), axis=1)
return x
model = TestNet()
self.run_compare_torch((1, 3, 16, 16), model, backend=backend)
class TestTypeAs(TorchBaseTest):
@pytest.mark.parametrize("backend, type",
itertools.product(
backends,
["int32", "float16", "float32", "bool"]
)
)
def test_type_as(self, backend, type):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x, y):
return x.type_as(y)
model = TestNet()
type_map = {
"int32": torch.int32,
"float16": torch.float16,
"float32": torch.float32,
"bool": torch.bool,
}
input = [
torch.Tensor([0,1,2,3]).to(torch.float32),
torch.Tensor([2,3]).to(type_map[type]),
]
self.run_compare_torch(input, model, backend=backend, input_as_shape=False)
class TestReduction(TorchBaseTest):
@pytest.mark.parametrize(
"input_shape, dim, keepdim, mode, backend",
itertools.product([(2, 2), (1, 1)], [0, 1], [True, False], ["min", "max"], backends)
)
def test_min_max(self, input_shape, dim, keepdim, mode, backend):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
def forward(self, x):
if mode == "min":
return torch.min(x, dim=dim, keepdim=keepdim)
elif mode == "max":
return torch.max(x, dim=dim, keepdim=keepdim)
else:
raise ValueError("Unsupported mode: {mode}".format(mode=mode))
input_data = torch.rand(input_shape)
model = TestModel()
# rdar://62681982 (Determine the output names of MLModels)
expected_results = model(input_data)[::-1]
self.run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLayerNorm(TorchBaseTest):
@pytest.mark.parametrize(
"input_shape, eps, backend",
itertools.product([(1, 3, 15, 15), (1, 1, 1, 1)], [1e-5, 1e-7], backends),
)
def test_layer_norm(self, input_shape, eps, backend):
model = nn.LayerNorm(input_shape, eps=eps)
self.run_compare_torch(input_shape, model, backend=backend)
class TestPixelShuffle(TorchBaseTest):
@pytest.mark.parametrize(
"batch_size, CHW, r, backend",
itertools.product([1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4], backends),
)
def test_pixel_shuffle(self, batch_size, CHW, r, backend):
C, H, W = CHW
input_shape = (batch_size, C * r * r, H, W)
model = nn.PixelShuffle(upscale_factor=r)
self.run_compare_torch(input_shape, model, backend=backend)
class TestExpand(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (-1, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand(self, backend, shapes):
input_shape, output_shape = shapes
class TestModel(torch.nn.Module):
def forward(self, x):
return x.expand(*output_shape)
model = TestModel()
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, input_shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (3, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand_as(self, backend, input_shapes):
class TestModel(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = TestModel()
self.run_compare_torch(input_shapes, model, backend=backend)
class TestExpandDims(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[
(rank, axis)
for rank in range(1, 5)
for axis in range(-rank - 1, rank + 1)
],
),
)
def test_unsqueeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = tuple(np.random.randint(low=2, high=10, size=rank))
model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis})
self.run_compare_torch(input_shape, model, backend=backend)
class TestEinsum(TorchBaseTest):
@pytest.mark.parametrize(
"backend, equation, reverse_input_order",
itertools.product(
backends,
["abcd,adce->abce",
"abc,cbd->abd",
"bnqd,bnkd->bnqk",
"abc,cd->abd",
"abc,cde->abde",
"btnh,bfnh->bnft",
"bnft,btnh->bfnh",
"abcd,cde->abe"],
[False, True]
),
)
def test_einsum(self, backend, equation, reverse_input_order):
class TestEinsum(nn.Module):
def __init__(self):
super(TestEinsum, self).__init__()
def forward(self, x, y):
return torch.einsum(equation, x, y)
if equation == "abcd,adce->abce":
input_shapes = [[3, 4, 2, 6], [3, 6, 2, 2]]
elif equation == "abc,cbd->abd":
input_shapes = [[4, 2, 6], [6, 2, 2]]
elif equation == "bnqd,bnkd->bnqk":
input_shapes = [[1,2,3,4], [1,2,4,4]]
elif equation == "abc,cd->abd":
input_shapes = [[2,3,4], [4,5]]
elif equation == "abc,cde->abde":
input_shapes = [[2,3,4], [4,5,6]]
elif equation == "btnh,bfnh->bnft":
input_shapes = [[1,2,3,4], [1,5,3,4]]
elif equation == "bnft,btnh->bfnh":
input_shapes = [[1,2,3,4], [1,4,2,6]]
elif equation == "abcd,cde->abe":
input_shapes = [[1,2,3,4], [3,4,6]]
else:
raise ValueError("unrecognized equation")
if reverse_input_order:
input_output_strings = equation.split('->')
input_strings = input_output_strings[0].split(',')
equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1]
input_shapes = [input_shapes[1], input_shapes[0]]
model = TestEinsum()
self.run_compare_torch(input_shapes, model, backend=backend, input_as_shape=True)
class TestSqueeze(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[(2, 1), (2, 0), (3, 1), (3, None), (4, None), (4, 2), (5, None), (5, -1),],
),
)
def test_squeeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = list(np.random.randint(low=2, high=10, size=rank))
if axis is not None:
input_shape[axis] = 1
else:
input_shape[0] = 1
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.squeeze, kwargs={"dim": axis} if axis else {}
)
self.run_compare_torch(input_shape, model, backend=backend)
class TestCumSum(TorchBaseTest):
@pytest.mark.parametrize(
"backend, axis",
itertools.product(
backends,
[-1, 0, 1, 2, 3],
),
)
def test_cumsum(self, backend, axis):
input_shape = list(np.random.randint(low=2, high=10, size=4))
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.cumsum, kwargs={"dim": axis}
)
self.run_compare_torch(input_shape, model, backend=backend)
class TestReshape(TorchBaseTest):
@pytest.mark.parametrize(
"backend, output_shape",
itertools.product(backends, [(3, 2), (2, -1), (2, 1, 1, 3),],),
)
def test_reshape(self, backend, output_shape):
input_shape = (2, 3)
model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape})
self.run_compare_torch(input_shape, model, backend=backend)
class TestFlatten(TorchBaseTest):
@pytest.mark.parametrize(
"backend, start_dim",
itertools.product(backends, [2,-2],),
)
def test_reshape(self, backend, start_dim):
input_shape = (2, 3, 4, 5)
model = ModuleWrapper(function=torch.flatten, kwargs={"start_dim": start_dim})
self.run_compare_torch(input_shape, model, backend=backend)
class TestGather(TorchBaseTest):
@pytest.mark.parametrize(
"rank_and_axis, backend",
itertools.product([(i, j) for i in range(1, 6) for j in range(0, i)], backends),
)
def test_gather_along_axis(self, rank_and_axis, backend):
rank, axis = rank_and_axis
params_shape = np.random.randint(low=2, high=5, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
indices = np.random.randint(0, params_shape[axis], size=indices_shape)
params_shape, indices_shape = tuple(params_shape), tuple(indices_shape)
model = ModuleWrapper(
function=torch.gather,
kwargs={"dim": axis, "index": torch.from_numpy(indices)},
)
self.run_compare_torch([params_shape], model, backend=backend)
class TestActivation(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL),
)
def test_relu(self, backend, shape):
model = nn.ReLU().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.relu_)
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL),
)
def test_relu6(self, backend, shape):
model = nn.ReLU6().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
)
def test_prelu(self, backend, alpha):
input_shape = (1, 5, 6, 7)
C = input_shape[1]
model = nn.PReLU(C, alpha).eval()
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape, alpha",
itertools.product(backends,
COMMON_SHAPES_ALL,
[0.1, 2.0, 1.4]
)
)
def test_leaky_relu(self, backend, shape, alpha):
model = nn.LeakyReLU(negative_slope=alpha).eval()
self.run_compare_torch(
shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL),
)
def test_softmax(self, backend, shape):
model = nn.Softmax().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, range_val",
itertools.product(
backends, [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
),
)
def test_hardtanh(self, backend, range_val):
input_shape = (1, 10, 4, 5)
model = nn.Hardtanh(range_val[0], range_val[1]).eval()
self.run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.hardtanh_,
{'min_val': range_val[0], 'max_val': range_val[1]})
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape, alpha",
itertools.product(backends,
COMMON_SHAPES_ALL,
[0.1, 2.0, 1.4]
)
)
def test_elu(self, backend, shape, alpha):
model = nn.ELU(alpha).eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL)
)
def test_gelu(self, backend, shape):
model = nn.GELU().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL),
)
def test_erf(self, backend, shape):
class ERFActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.erf(x)
model = ERFActivation().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends,
[(1, 10), (1, 3, 5), (1, 5, 6, 7), (1, 3, 4, 5, 6)]
),
)
def test_sigmoid(self, backend, shape):
model = nn.Sigmoid().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL)
)
def test_sigmoid_hard(self, backend, shape):
model = nn.Hardsigmoid().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
)
@pytest.mark.skipif(
_macos_version() <= (10, 15),
reason="Parametric SoftPlus segfaults on macOS 10.15 and below.",
)
def test_softplus(self, backend, beta, threshold):
input_shape = (1, 10, 5, 15)
model = nn.Softplus(beta, threshold).eval()
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(backends, COMMON_SHAPES_ALL)
)
def test_softsign(self, backend, shape):
model = nn.Softsign().eval()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.skipif(
condition=version_lt(torch, "1.7.0"),
reason="torch.nn.SiLU available only in PyTorch 1.7.0+",
)
@pytest.mark.parametrize(
"shape, backend",
itertools.product([(1, 10), (1, 3, 4), (1, 4, 5, 6)], backends),
)
def test_silu(self, shape, backend):
model = ModuleWrapper(function=torch.nn.functional.silu)
self.run_compare_torch([shape], model, backend=backend)
@pytest.mark.parametrize(
"rounding_mode, backend",
itertools.product([None, "floor", "trunc"], backends),
)
def test_div(self, rounding_mode, backend):
model = ModuleWrapper(function=torch.div,
kwargs={"rounding_mode": rounding_mode})
x1 = torch.from_numpy(np.array([2.3, 2.6, -3.6, -3.2], dtype=np.float32))
x2 = torch.from_numpy(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32))
out = torch.div(x1, x2, rounding_mode=rounding_mode)
self.run_compare_torch(
[x1, x2],
model,
backend=backend,
input_as_shape=False,
expected_results=out,
)
class TestElementWiseUnary(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shape, op_string",
itertools.product(
backends,
[(1, 3, 5, 8)],
[
"abs",
"acos",
"asin",
"atan",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"round",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"sign",
],
),
)
def test_elementwise_no_params(self, backend, shape, op_string):
if not contains_op(torch, op_string):
return
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape, clamp_range",
itertools.product(
backends,
[(1, 3, 5, 8)],
[(0.0, 1.0), (-1.0, 0.5), (0.2, 0.7), (None, 4.0), (-3.0, None)],
),
)
def test_clamp(self, backend, shape, clamp_range):
params_dict = {}
if clamp_range[0] is not None:
params_dict["min"] = clamp_range[0]
if clamp_range[1] is not None:
params_dict["max"] = clamp_range[1]
model = ModuleWrapper(torch.clamp, params_dict)
self.run_compare_torch(
shape, model, backend=backend, rand_range=(-5, 5)
)
@pytest.mark.parametrize(
"backend, shape, threshold",
itertools.product(
backends,
[(1, 3, 5, 8)],
[(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)]
),
)
def test_threshold(self, backend, shape, threshold):
model = torch.nn.Threshold(threshold[0], threshold[1]).eval()
self.run_compare_torch(
shape, model, backend=backend,
use_cpu_for_conversion=True, # TODO: change this to False (rdar://78343191)
)
@pytest.mark.parametrize(
"backend, shape, op_string",
itertools.product(
backends,
[(1, 3, 5, 8)],
[
"log",
"rsqrt",
"reciprocal",
],
),
)
def test_elementwise_numerically_stable(self, backend, shape, op_string):
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
self.run_compare_torch(
shape, model, backend=backend, rand_range=(20, 100)
)
class TestMatMul(TorchBaseTest):
@pytest.mark.parametrize("backend", backends)
def test_bmm(self, backend):
shape_x, shape_y = (3,4,5), (3,5,6)
model = ModuleWrapper(function=torch.bmm)
self.run_compare_torch(
[shape_x, shape_y], model, backend=backend,
)
class TestSplit(TorchBaseTest):
@pytest.mark.parametrize(
"backend, split_size_or_sections, dim",
itertools.product(backends, [1, 2, [1, 4]], [0, -2]),
)
def test_split(self, backend, split_size_or_sections, dim):
input_shape = (5, 2)
model = ModuleWrapper(function=torch.split,
kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim})
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, split_sizes, dim",
itertools.product(backends, [[1, 4], [3, 2]], [-1, -2]),
)
def test_split_with_sizes(self, backend, split_sizes, dim):
input_shape = (5, 5)
model = ModuleWrapper(function=torch.split_with_sizes,
kwargs={"split_sizes": split_sizes, "dim": dim})
self.run_compare_torch(input_shape, model, backend=backend)
class TestUnbind(TorchBaseTest):
@pytest.mark.parametrize(
"backend, dim",
itertools.product(backends,[0,1,2]),
)
def test_unbind(self, backend, dim):
input_shape = (3, 3, 4)
model = ModuleWrapper(function=torch.unbind,
kwargs={"dim": dim})
self.run_compare_torch(input_shape, model, backend=backend)
class TestTranspose(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shape, dims",
itertools.product(backends, COMMON_SHAPES, [(0, 1), (-2, -1), (1, 0), (-1, -2)]),
)
def test(self, backend, shape, dims):
model = ModuleWrapper(function=torch.transpose,
kwargs={"dim0": dims[0], "dim1": dims[1]})
self.run_compare_torch(shape, model, backend=backend)
class TestTo(TorchBaseTest):
@pytest.mark.parametrize(
"use_cpu_for_conversion, backend", itertools.product([True, False], backends,)
)
def test_cast_bug(self, use_cpu_for_conversion, backend):
if backend[0] == "mlprogram" and not use_cpu_for_conversion:
pytest.xfail("rdar://78343191 ((MIL GPU) Core ML Tools Unit Test failures [failure to load or Seg fault])")
if backend[0] == "mlprogram" and use_cpu_for_conversion:
pytest.xfail("numerical mismatch : rdar://78952850")
class TestModel(torch.nn.Module):
def forward(self, spans, embedding):
spans = spans.float().relu().int()
max1, _ = torch.max(spans, dim=1, keepdim=False)
max1, _ = torch.max(max1, dim=1, keepdim=False)
max2, _ = torch.max(embedding, dim=1, keepdim=False)
max2, _ = torch.max(max2, dim=1, keepdim=False)
sigmoided_scores = max1 + max2
return sigmoided_scores
model = TestModel()
self.run_compare_torch([(1, 21, 2), (1, 6, 384)], model, backend=backend,
use_cpu_for_conversion=use_cpu_for_conversion)# [spans.shape, embedding.shape]
class TestSlice(TorchBaseTest):
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend", backends,
)
def test_dynamic_slice(self, backend):
class DynamicSlicer(torch.nn.Module):
def __init__(self):
super(DynamicSlicer, self).__init__()
def forward(self, x, context_length):
return x[context_length:, :, :]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
self.context_embedding = torch.nn.Embedding(10, 10, 0)
self.dynamic_slicer = DynamicSlicer()
def forward(self, tokens, context, context_length):
# CoreML requires rank1~5 input, so we use rank 1 for
# context-length
tokens_embeddings = self.tokens_embedding(tokens)
context_embeddings = self.context_embedding(context)
embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0)
embeddings = self.dynamic_slicer(embeddings,
torch.squeeze(context_length))
return embeddings
model = Model()
batch_size = 5
inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
TensorType(name="context_length", shape=(1,), dtype=np.int32),
]
self.run_compare_torch(inputs, model, rand_range=(0, 8),
backend=backend, use_scripting=False)
class TestRepeat(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, list(range(1, 6))),
)
def test_repeat(self, backend, rank):
input_shape = np.random.randint(low=2, high=6, size=rank)
repeats = np.random.randint(low=2, high=4, size=rank)
input_shape = tuple(input_shape)
model = ModuleWrapper(function=lambda x: x.repeat(*repeats))
self.run_compare_torch(input_shape, model, backend=backend)
class TestStd(TorchBaseTest):
@pytest.mark.parametrize(
"backend, unbiased",
itertools.product(backends, [True, False]),
)
def test_std_2_inputs(self, backend, unbiased):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased})
x = torch.randn(1, 5, 10) * 3
out = torch.std(x, unbiased=unbiased).unsqueeze(0)
self.run_compare_torch(x, model, expected_results=out,
input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, unbiased, dim, keepdim",
itertools.product(backends, [True, False], [[0,2], [1], [2]], [True, False]),
)
def test_std_4_inputs(self, backend, unbiased, dim, keepdim):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim})
input_shape = (2, 5, 10)
self.run_compare_torch(input_shape, model, backend=backend)
class TestZeros(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[1, 3],
),
)
def test_zeros_like_static(self, backend, rank):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported with ML Program backend")
class ZerosLikeStaticModel(nn.Module):
def __init__(self):
super(ZerosLikeStaticModel, self).__init__()
def forward(self, x):
return torch.zeros_like(x)
input_shape = np.random.randint(low=2, high=6, size=rank)
input_shape = tuple(input_shape)
model = ZerosLikeStaticModel()
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[1, 3],
),
)
def test_zeros_like_dynamic(self, backend, rank):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported with ML Program backend")
class ZerosLikeDynamicModel(nn.Module):
def __init__(self):
super(ZerosLikeDynamicModel, self).__init__()
def forward(self, x):
if rank == 1:
h = x[0]
x = torch.zeros(h)
elif rank == 3:
h, w, d = x[0], x[1], x[2]
x = torch.zeros(h, w, d)
return torch.zeros_like(x)
input_shape = np.random.randint(low=2, high=6, size=rank)
torch_in = torch.tensor(input_shape)
model = ZerosLikeDynamicModel()
torch_out = model(torch_in)
self.run_compare_torch(torch_in, model, expected_results=torch_out,
input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[1, 3],
),
)
def test_zeros_static(self, backend, rank):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported with ML Program backend")
class ZerosStaticModel(nn.Module):
def __init__(self):
super(ZerosStaticModel, self).__init__()
def forward(self, x):
if rank == 1:
return torch.zeros(1)
elif rank == 3:
return torch.zeros(2, 3, 5)
input_shape = np.random.randint(low=2, high=6, size=rank)
input_shape = tuple(input_shape)
model = ZerosStaticModel()
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[1, 3],
),
)
def test_zeros_dynamic(self, backend, rank):
if backend[0] == 'mlprogram':
pytest.xfail("Not supported with ML Program backend")
class ZerosDynamicModel(nn.Module):
def __init__(self):
super(ZerosDynamicModel, self).__init__()
def forward(self, x):
if rank == 1:
h = x[0]
x = torch.zeros(h)
elif rank == 3:
h, w, d = x[0], x[1], x[2]
x = torch.zeros(h, w, d)
return x
input_shape = np.random.randint(low=2, high=6, size=rank)
torch_in = torch.tensor(input_shape)
model = ZerosDynamicModel()
torch_out = model(torch_in)
self.run_compare_torch(torch_in, model, expected_results=torch_out,
input_as_shape=False, backend=backend)
class TestTopk(TorchBaseTest):
@pytest.mark.parametrize(
"backend, largest, shape_dim_k",
itertools.product(
backends,
[True, False],
[
((4, 6, 7, 3), -1, 2),
((10, 3, 4), 2, 2),
((5,), 0, 2)
],
),
)
def test_topk(self, backend, largest, shape_dim_k):
input_shape = shape_dim_k[0]
dim = shape_dim_k[1]
k = shape_dim_k[2]
class TopkModel(nn.Module):
def __init__(self):
super(TopkModel, self).__init__()
def forward(self, x):
return torch.topk(x, k, dim=dim, largest=largest)
input_data = torch.rand(input_shape)
model = TopkModel()
expected_results = model(input_data)
expected_results = [expected_results.values, expected_results.indices]
self.run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLog10(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_log10(self, backend, rank):
class Log10Model(nn.Module):
def __init__(self):
super(Log10Model, self).__init__()
def forward(self, x):
return torch.log10(x)
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = Log10Model()
self.run_compare_torch(
input_shape, model, backend=backend,
)
class TestFlip(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank_dim",
itertools.product(
backends,
[
(1, [0]),
(2, [0, 1]),
(3, [1]),
(4, [0, 1, 2, 3])
]
),
)
def test_flip(self, backend, rank_dim):
rank, dim = rank_dim
class FlipModel(nn.Module):
def __init__(self):
super(FlipModel, self).__init__()
def forward(self, x):
return torch.flip(x, dim)
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = FlipModel()
self.run_compare_torch(
input_shape, model, backend=backend,
)
class TestWhere(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[(2, 6), (3, 4, 5)]
),
)
def test_where_test1(self, backend, shape):
class WhereModel(nn.Module):
def __init__(self):
super(WhereModel, self).__init__()
def forward(self, x, y):
return torch.where(x > 0.5, x, y)
input_shape = [shape, shape]
model = WhereModel()
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[(2, 6), (3, 4, 5)]
),
)
def test_where_test2(self, backend, shape):
class WhereModel(nn.Module):
def __init__(self):
super(WhereModel, self).__init__()
def forward(self, cond, x, y):
return torch.where(cond, x, y)
cond = torch.rand(*shape) > 0.5
inputs = [cond, torch.rand(*shape), torch.rand(*shape)]
model = WhereModel()
expected_results = model(*inputs)
self.run_compare_torch(
inputs,
model,
backend=backend,
expected_results=expected_results,
input_as_shape=False,
)
class TestSelect(TorchBaseTest):
@pytest.mark.parametrize(
"backend, dim_index",
itertools.product(
backends,
[
[0, 0],
[1, 1],
[-1, -1],
]
),
)
def test_select(self, backend, dim_index):
dim, index = dim_index
class SelectModel(nn.Module):
def __init__(self):
super(SelectModel, self).__init__()
def forward(self, x):
return x.select(dim, index)
input_shape = (1,2,3)
model = SelectModel()
self.run_compare_torch(
input_shape, model, backend=backend,
)
class TestNonZero(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[1, 3],
),
)
def test_non_zero(self, backend, rank):
if rank == 1:
input_shape = (10)
zeros_indices = np.array([1, 4, 7, 9])
elif rank == 3:
input_shape = (2, 7, 3)
zeros_indices = np.array([1, 12, 33, 40])
input = np.arange(np.prod(input_shape)).astype(np.float32)
input[zeros_indices] = 0
input = np.reshape(input, input_shape)
input = torch.tensor(input)
model = ModuleWrapper(
torch.nonzero,
)
self.run_compare_torch(input, model,
input_as_shape=False, backend=backend)
class TestTensorAssign(TorchBaseTest):
@pytest.mark.parametrize(
"backend",
backends,
)
def test_tensor_assign_case_1(self, backend):
# single dimension assignment for a 1D tensor
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x):
x[0] = 0
x[1] = 1
y = x + 1
x[1] = 2 * y[1]
return x, y
shape = (5,)
model = TensorAssignModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend",
backends,
)
def test_tensor_assign_case_2(self, backend):
# single dimension assignment for two 1D tensors
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x, y):
x[0] = 0
y[1] = 2
y = x + y
x = 2 * y
y[3] = x[1] + 5
y[0] = x[0] * 10
z = x + y
return z, x, y
shape = (5,)
model = TensorAssignModel()
self.run_compare_torch(
[shape, shape], model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(5,4),
(5,4,3),
]
),
)
def test_tensor_assign_case_3(self, backend, shape):
# broadcast assignment for two n-D tensors
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x, y):
x[0] = 0
x[3] = 1
y[2] = 2
return x
model = TensorAssignModel()
self.run_compare_torch(
[shape, shape], model, backend=backend,
)
@pytest.mark.parametrize(
"backend",
backends,
)
def test_itensor_assign_case_4(self, backend):
# single dimension assignment for two n-D tensors
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x, y):
x[0] = torch.tensor([1.,2.,3.,4.])
x[3] = 1
y[0] = x[0]
return x, y
shape = (5,4)
model = TensorAssignModel()
self.run_compare_torch(
[shape, shape], model, backend=backend,
)
@pytest.mark.parametrize(
"backend",
backends,
)
def test_tensor_assign_case_5(self, backend):
# slice dimension assigment
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x):
x[:,1] = torch.tensor([1., 2.])
return x
shape = (2,10)
model = TensorAssignModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend",
backends,
)
def test_tensor_assign_case_6(self, backend):
# a more complicated slice dimension assigment
class TensorAssignModel(torch.nn.Module):
def __init__(self):
super(TensorAssignModel, self).__init__()
def forward(self, x):
x[:,1,:] = torch.tensor([1., 2., 3., 4., 5., 6.]).view(2,3)
return x
shape = (2,10,3)
model = TensorAssignModel()
self.run_compare_torch(
shape, model, backend=backend,
)
class TestIndexPut(TorchBaseTest):
@pytest.mark.parametrize(
"backend",
backends,
)
def test_index_put_case_1(self, backend):
class IndexPutModel(torch.nn.Module):
def __init__(self):
super(IndexPutModel, self).__init__()
def forward(self, x, y):
y = x + 1
mask = torch.tensor([True, False, False, False, True, True]).view(3,2)
x[mask] = y[mask]
return x
shape = (3,2)
model = IndexPutModel()
self.run_compare_torch(
[shape, shape], model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
backends,
[0, 1],
),
)
def test_index_put_case_2(self, backend, rank):
class IndexPutModel(torch.nn.Module):
def __init__(self):
super(IndexPutModel, self).__init__()
def forward(self, x):
mask = torch.tensor([True, False, False, False, True, True]).view(3,2)
if rank == 0:
x[mask] = 0.
if rank == 1:
x[mask] = torch.tensor([1.])
return x
shape = (3,2)
model = IndexPutModel()
self.run_compare_torch(
shape, model, backend=backend,
)
class TestIndex(TorchBaseTest):
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(10,),
(3, 4, 5, 6),
]
),
)
def test_index_bool_index(self, backend, shape):
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
return x[x > 0.5]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2),
(3, 4, 5, 6),
]
),
)
def test_index_int_index_case_1(self, backend, shape):
# all elements are selected
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 2:
return x[:, :]
elif len(shape) == 4:
return x[:]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2),
(3, 4, 5, 6),
]
),
)
def test_index_int_index_case_2(self, backend, shape):
# only one axis is sliced
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 2:
index = torch.tensor([0])
return x[index, :]
elif len(shape) == 4:
index = torch.tensor([1, 2])
return x[:, :, index]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2, 3),
(2, 3, 4, 5),
]
),
)
def test_index_int_index_case_3(self, backend, shape):
# only two axes are sliced, and connected
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 3:
index_1 = torch.tensor([0])
index_2 = torch.tensor([1])
return x[index_1, index_2, :]
elif len(shape) == 4:
index_1 = torch.tensor([0, 1, 1])
index_2 = torch.tensor([2, 1, 0])
return x[:, index_1, index_2, :]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2, 3),
(2, 3, 4, 5),
]
),
)
def test_index_int_index_case_4(self, backend, shape):
# only two axes are sliced, and not connected
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 3:
index_1 = torch.tensor([0])
index_2 = torch.tensor([1])
return x[index_1, :,index_2]
elif len(shape) == 4:
index_1 = torch.tensor([0, 1, 1])
index_2 = torch.tensor([3, 3, 4])
return x[index_1, :, :, index_2]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2, 3),
(2, 3, 4, 5),
]
),
)
def test_index_int_index_case_5(self, backend, shape):
# all axes are sliced
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 3:
index_1 = torch.tensor([0])
index_2 = torch.tensor([1])
index_3 = torch.tensor([2])
return x[index_1, index_2, index_3]
elif len(shape) == 4:
index_1 = torch.tensor([0, 1, 1, 0, 0])
index_2 = torch.tensor([1, 2, 0, 0, 0])
index_3 = torch.tensor([0, 1, 2, 3, 3])
index_4 = torch.tensor([2, 1, 0, 4, 4])
return x[index_1, index_2, index_3, index_4]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2),
(3, 4, 5, 6),
]
),
)
def test_index_int_index_case_6(self, backend, shape):
# only one axis is sliced + nd mode
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 2:
index = torch.tensor([0,0,0,0,0,0])
index = index.view(2, 3)
return x[index, :]
elif len(shape) == 4:
index = torch.tensor([0,1,2,3,0,1])
index = index.view(3, 2)
return x[:, index]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2, 3),
(2, 3, 4, 5),
]
),
)
def test_index_int_index_case_7(self, backend, shape):
# two axes are sliced, and connected + nd mode
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 3:
index_1 = torch.tensor([0,0,0,0,0,0,0,0]).view(4,2)
index_2 = torch.tensor([1,0,0,0,1,1,1,1]).view(4,2)
return x[index_1, index_2, :]
elif len(shape) == 4:
index_1 = torch.tensor([0,0,2,2,1,1,2,0]).view(2,4)
index_2 = torch.tensor([0,1,2,3,0,1,2,3]).view(2,4)
return x[:, index_1, index_2, :]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, shape",
itertools.product(
backends,
[
(1, 2, 3),
(2, 3, 4, 5),
]
),
)
def test_index_int_index_case_8(self, backend, shape):
# two axes are sliced, and not connected + nd mode
class IndexModel(torch.nn.Module):
def __init__(self):
super(IndexModel, self).__init__()
def forward(self, x):
if len(shape) == 3:
index_1 = torch.tensor([0,0,0,0,0,0,0,0]).view(2,4)
index_2 = torch.tensor([1,0,0,2,2,1,1,1]).view(2,4)
return x[index_1, :,index_2]
elif len(shape) == 4:
index_1 = torch.tensor([0,1,1,1,1,1,0,0]).view(4,2)
index_2 = torch.tensor([0,1,2,3,4,0,1,2]).view(4,2)
return x[index_1, :, :, index_2]
model = IndexModel()
self.run_compare_torch(
shape, model, backend=backend,
)
class TestPad(TorchBaseTest):
@pytest.mark.parametrize(
"backend, rank, mode",
itertools.product(backends, range(3, 5), ['reflect', 'replicate'])
)
def test_pad_reflect_replicate(self, backend, rank: int, mode: str):
if rank == 3:
pad_len = 2
input_shape = (5, 10, 10)
elif rank == 4:
pad_len = 4
input_shape = (10, 5, 5, 10)
else:
raise NotImplementedError("Only 3D, 4D padding with non-constant padding are supported for now")
max_pad = min(input_shape[-1], input_shape[-2])
pad = list(np.random.randint(low=0, high=max_pad,
size=pad_len))
model = ModuleWrapper(function=torch.nn.functional.pad,
kwargs={"pad": pad, "mode": mode})
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, range(1, 6))
)
def test_pad_constant(self, backend, rank: int):
if rank > 5:
raise NotImplementedError("Only supports < 6D constant padding")
val = float(np.random.random(1))
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
pad_dims = np.random.randint(low=1, high=rank+1)
pad = list(np.random.randint(low=0, high=10,
size=pad_dims*2))
model = ModuleWrapper(function=torch.nn.functional.pad,
kwargs={"pad": pad, "mode": "constant", "value": val})
self.run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize("backend", backends)
def test_constant_pad_1d(self, backend):
input_shape = (3, 4, 5)
model = torch.nn.ConstantPad1d((5, 6), 3.5).eval()
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_constant_pad_2d(self, backend):
input_shape = (3, 4, 5, 6)
model = torch.nn.ConstantPad2d((5, 6, 3, 8), 3.5).eval()
self.run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_constant_pad_3d(self, backend):
input_shape = (3, 4, 5, 6, 2)
model = torch.nn.ConstantPad3d((5, 6, 3, 8, 2, 4), 3.5).eval()
self.run_compare_torch(input_shape, model, backend=backend)
class TestMeshgrid(TorchBaseTest):
@pytest.mark.parametrize(
"rows, cols, dtype, inp_mode, backend",
itertools.product(
[1, 2, 3], [1, 2, 3], [torch.int, torch.float], ["norm", "list"], backends
),
)
def test_meshgrid(
self,
rows,
cols,
dtype,
inp_mode,
backend,
):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
def forward(self, rows, cols):
if inp_mode == "norm":
return torch.meshgrid(rows, cols)
elif inp_mode == "list":
return torch.meshgrid([rows, cols])
else:
raise ValueError("Unsupported mode: {mode}".format(mode=inp_mode))
inputs = (
torch.arange(start=0, end=rows, step=1, dtype=dtype),
torch.arange(start=0, end=cols, step=1, dtype=dtype)
)
model = TestModel().eval()
expected_results = model(*inputs)
self.run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend,
)
|
[
"noreply@github.com"
] |
lexmz.noreply@github.com
|
81119701667201237eeb496143a3ed5bdf2a5c9d
|
3f6ef41c31d7a1248645ea742c349d3e8fecf2cb
|
/encryption_server.py
|
ed86f7191b10c92aa49ec1158028fb02d16cfe83
|
[] |
no_license
|
omersmoro/teacher_student_system
|
b94ec5b663a9141fa2731391631ec9306d7fd11d
|
1831809656f8f55026df79048dc8de11446537c0
|
refs/heads/master
| 2021-01-11T16:13:42.411237
| 2017-06-17T13:32:43
| 2017-06-17T13:32:43
| 80,042,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
import socket
from Crypto import Random
def main():
"""
Add Documentation here
"""
my_socket = socket.socket()
my_socket.bind(("127.0.0.1", 80))
my_socket.listen(1)
client, address = my_socket.accept()
ciphertext = client.accept()
obj2 = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
obj2.decrypt(ciphertext)
def make_keys(msg):
"""
"""
random_generator = Random.new().read
key = RSA.generate(1024, random_generator)
publickey = key.publickey()
encrypted = publickey.encrypt(msg, 32)
if __name__ == '__main__':
main()
|
[
"omer.smorodinsky@gmail.com"
] |
omer.smorodinsky@gmail.com
|
ded85cad6076ca226a4a37014efaff83d4a0dae9
|
0dc8439d58bba23606e98c839594236679e959e3
|
/core/models.py
|
6df7195c14bd126ac768b657651d2b45fc601df5
|
[] |
no_license
|
araftery/cks-tour-management
|
29684edd221d6ba450199b8ed0c32abc41b7a52f
|
a8eeaf41ec87d6d45c60a8b7fcb64dcc3b64f441
|
refs/heads/master
| 2021-01-10T13:54:44.487490
| 2017-01-26T00:38:45
| 2017-01-26T00:38:45
| 48,933,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext as _
from core.setting_validators import setting_validators
class Setting(models.Model):
name = models.CharField(max_length=500)
value = models.CharField(max_length=500)
description = models.CharField(max_length=1000, null=True, blank=True)
time_set = models.DateTimeField()
order_num = models.IntegerField()
value_type_choices = ['int', 'float', 'string', 'bool', 'email', 'semester_or_never']
value_type_choice_tuples = [(i, i) for i in value_type_choices]
value_type = models.CharField(choices=value_type_choice_tuples, max_length=50)
def __unicode__(self):
return u'{}: {}'.format(self.name, self.value)
def save(self, *args, **kwargs):
# avoid ciruclar import
from core.utils import now
now_obj = now()
self.time_set = now_obj
if not self.pk:
# object is new, just save it
return super(Setting, self).save(*args, **kwargs)
else:
# don't save it, create a new object instead
Setting.objects.create(name=self.name, value=self.value, description=self.description, order_num=self.order_num, value_type=self.value_type, time_set=now_obj)
def clean(self):
value = self.value
try:
validation = setting_validators[self.value_type](value)
if validation['valid'] is True:
value = validation['value']
else:
errors = []
for error in validation['errors']:
errors.append(ValidationError(_(error), code='invalid'))
raise ValidationError({'value': errors})
except (IndexError, KeyError):
raise ValidationError({'value': _('Invalid value.')}, code='invalid')
|
[
"andrewraftery@gmail.com"
] |
andrewraftery@gmail.com
|
eb2aec7c6ddd7357232c9b1d439ea356b93350c5
|
a6069dd776bfff49cbb9cfcdae7c9d99622c4162
|
/setup.py
|
892747253b99db30ba13294c955656d889733735
|
[] |
no_license
|
stigi99/gra
|
1ffac5a1b9f5290c2df3d61c0741a88ff6c814ca
|
9b6fc73ea7725fd0452e9f871ad3a42a93014dd0
|
refs/heads/master
| 2020-08-29T02:23:23.925942
| 2019-12-18T17:17:17
| 2019-12-18T17:17:17
| 217,892,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import cx_Freeze
executables = [cx_Freeze.Executable("gra.py")]
cx_Freeze.setup(
name="moja gra",
options={"build_exe": {"packages":["pygame"],
"include_files":["logo.png","paletka.png","tlo.jpeg"]}},
executables = executables
)
|
[
"misiakmateusz@yahoo.com"
] |
misiakmateusz@yahoo.com
|
d9051a06f4e2241c2b6a4335c2f17eb32a9bbbd7
|
786232b3c9eac87728cbf2b5c5636d7b6f10f807
|
/Leetcode/medium/39.py
|
c2f2d1b0da2e98ae021611c0136260e1c8c51c24
|
[] |
no_license
|
luoyanhan/Algorithm-and-data-structure
|
c9ada2e123fae33826975665be37ca625940ddd4
|
fb42c3a193f58360f6b6f3b7d5d755cd6e80ad5b
|
refs/heads/master
| 2021-12-22T15:45:28.260386
| 2021-12-02T03:08:35
| 2021-12-02T03:08:35
| 251,007,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
class Solution:
def combinationSum(self, candidates, target: int):
candidates.sort()
length = len(candidates)
path = list()
res = list()
def dfs(start, resident):
if resident == 0:
res.append(path[:])
return
for i in range(start, length):
new_resident = resident - candidates[i]
if new_resident < 0:
break
path.append(candidates[i])
dfs(i, new_resident)
path.pop()
dfs(0, target)
return res
|
[
"luoyanhan@alphaleader.com"
] |
luoyanhan@alphaleader.com
|
d00f9aa2b4866100b3c927b5fc41e5a741797343
|
70280955a5382d73e58395eba78c119a400f4ce7
|
/abc/135/1.py
|
7cf4a432a94666e9eaad356efba90fe8c4657220
|
[] |
no_license
|
cohock13/atcoder
|
a7d0e26a10a4e58690347a2e36839c2f503a79ba
|
d268aa68fc96203eab94d021bd158cf84bdb00bc
|
refs/heads/master
| 2021-01-03T00:41:31.055553
| 2020-10-27T12:28:06
| 2020-10-27T12:28:06
| 239,839,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
def main():
A,B = map(int,input().split())
if ((A+B)/2)%1 != 0:
print("IMPOSSIBLE")
else:
print(int((A+B)/2))
if __name__=="__main__":
main()
|
[
"callout2690.gmail.com"
] |
callout2690.gmail.com
|
069545c800034f388616e6c0cf982eca7b46a0b1
|
fddaeea72a9e437dae53ecd4fc061aea9ce16e30
|
/django-notes/django_project/django_learning/django_learning/settings.py
|
644f7dc4d633595574bb2d738f22c9db4f7c3f36
|
[] |
no_license
|
coderliuhao/DataScienceBeginner
|
a36d950ba1465892081cfe39a6d4d4fcfb57829a
|
ed3e329ef8b2d43bed12ddead109f74375b925b3
|
refs/heads/main
| 2023-07-08T07:19:59.567318
| 2021-08-02T08:05:20
| 2021-08-02T08:05:20
| 312,614,915
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,603
|
py
|
"""
Django settings for django_learning project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y@fdhbg06@tn&*ra8iriey5*0f5rhy#xo*efe4wz0@e3*+67hb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'second_day'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_learning.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_learning.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysql',
'HOST':'localhost',
'PORT':'3306',
'USER':'root',
'PASSWORD':'liuhao123',
'OPTIONS':{
'init_command':"SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
}
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
#TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=(
os.path.join(BASE_DIR,"static"),
)
|
[
"noreply@github.com"
] |
coderliuhao.noreply@github.com
|
8556e14514798eb91ab62f518913a6d58c87ad87
|
2ec6e3f3dc5af9ebc57662a0506a2c4d9ab31b7b
|
/Task05.py
|
f054b61ce03c54904525f3694818eaea6586f3b5
|
[] |
no_license
|
ivanlykhosherst/Pythonrapidtest
|
0c90a18989bf87fa724f78950d9c7fb3e83f88fd
|
7d62be2eaba163ae6317e42853575b5c493ad887
|
refs/heads/main
| 2023-07-04T10:17:58.856827
| 2021-08-04T19:38:12
| 2021-08-04T19:38:12
| 392,603,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Выведение первых n строк треугольника Паскаля.
def printPascal(n):
for line in range(1, n + 1):
C = 1 # used to represent C(line, i)
for i in range(1, line + 1):
# The first value in a
# line is always 1
print(C, end=" ")
C = int(C * (line - i) / i)
print("")
n = 15
printPascal(n)
|
[
"Lykhosherst@gmail.com"
] |
Lykhosherst@gmail.com
|
824ea316894e03128bf365706456b1826ee2e214
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/google_or_tools/traffic_lights_sat.py
|
b70b7800f00655fd11207106aa4b0dfc3f871cd6
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,113
|
py
|
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Traffic lights problem in OR-tools CP-SAT Solver.
CSPLib problem 16
http://www.cs.st-andrews.ac.uk/~ianm/CSPLib/prob/prob016/index.html
'''
Specification:
Consider a four way traffic junction with eight traffic lights. Four of the
traffic
lights are for the vehicles and can be represented by the variables V1 to V4
with domains
{r,ry,g,y} (for red, red-yellow, green and yellow). The other four traffic
lights are
for the pedestrians and can be represented by the variables P1 to P4 with
domains {r,g}.
The constraints on these variables can be modelled by quaternary constraints
on
(Vi, Pi, Vj, Pj ) for 1<=i<=4, j=(1+i)mod 4 which allow just the tuples
{(r,r,g,g), (ry,r,y,r), (g,g,r,r), (y,r,ry,r)}.
It would be interesting to consider other types of junction (e.g. five roads
intersecting) as well as modelling the evolution over time of the traffic
light sequence.
...
Results
Only 2^2 out of the 2^12 possible assignments are solutions.
(V1,P1,V2,P2,V3,P3,V4,P4) =
{(r,r,g,g,r,r,g,g), (ry,r,y,r,ry,r,y,r), (g,g,r,r,g,g,r,r),
(y,r,ry,r,y,r,ry,r)}
[(1,1,3,3,1,1,3,3), ( 2,1,4,1, 2,1,4,1), (3,3,1,1,3,3,1,1), (4,1, 2,1,4,1,
2,1)}
The problem has relative few constraints, but each is very tight. Local
propagation
appears to be rather ineffective on this problem.
'''
Note: In this model we use only the constraint solver.AllowedAssignments().
This is a port of my old CP model traffic_lights.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
class SolutionPrinter(cp.CpSolverSolutionCallback):
"""SolutionPrinter"""
def __init__(self, n, lights, V, P):
cp.CpSolverSolutionCallback.__init__(self)
self.__n = n
self.__lights = lights
self.__V = V
self.__P = P
self.__solution_count = 0
def OnSolutionCallback(self):
self.__solution_count += 1
for i in range(self.__n):
print("%+2s %+2s" %
(self.__lights[self.Value(self.__V[i])],
self.__lights[self.Value(self.__P[i])]), end=" ")
print()
def SolutionCount(self):
return self.__solution_count
def main(base=10, start=1, len1=1, len2=4):
model = cp.CpModel()
#
# data
#
n = 4
r, ry, g, y = list(range(n))
lights = ["r", "ry", "g", "y"]
# The allowed combinations
allowed = []
allowed.extend([(r, r, g, g), (ry, r, y, r), (g, g, r, r), (y, r, ry, r)])
#
# declare variables
#
V = [model.NewIntVar(0, n - 1, "V[%i]" % i) for i in range(n)]
P = [model.NewIntVar(0, n - 1, "P[%i]" % i) for i in range(n)]
#
# constraints
#
for i in range(n):
for j in range(n):
if j == (1 + i) % n:
model.AddAllowedAssignments((V[i], P[i], V[j], P[j]), allowed)
#
# Search and result
#
solver = cp.CpSolver()
solution_printer = SolutionPrinter(n, lights, V, P)
status = solver.SearchForAllSolutions(model, solution_printer)
if status != cp.OPTIMAL:
print("No solution!")
print()
print("NumSolutions:", solution_printer.SolutionCount())
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
print()
if __name__ == "__main__":
main()
|
[
"hakank@gmail.com"
] |
hakank@gmail.com
|
71de948677a2d66466ca4b291df6c8e5dd7d63ee
|
458a57c889a14a364159dd1ac7831a57eee2111b
|
/utils.py
|
c3f568bdf2253a38f58433b3679ee2bbf8ef99b5
|
[] |
no_license
|
Lihsayuri/Projeto1a-TecWeb
|
d6abdd68e2557a34b39bd85a86fdf7330ce29596
|
aa0c6282e45b2e02a714a0e18592d965cc9ce856
|
refs/heads/main
| 2023-08-10T00:33:57.715504
| 2021-09-12T17:13:36
| 2021-09-12T17:13:36
| 405,472,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,489
|
py
|
import json
from os import path
from database import *
def extract_route(requisicao):
#exemplo: GET /img/logo-getit.png HTTP/1.1
#separa primeiro a partir do GET /
if requisicao.startswith("GET /"):
lista1 = requisicao.split("GET /")
# Dessa separação o primeiro termo vai ser o GET /, e a partir do segundo vai ser o resto
#Mais uma vez peço para separar mas agora a partir do espaço
#Com essa separação, tenho que o termo 0 vai ser o /img/logo-getit.png, o que quero!
elif requisicao.startswith("POST /"):
lista1 = requisicao.split("POST /")
lista2 = lista1[1].split(" ")
return lista2[0]
def read_file(filepath):
print(filepath)
string = str(filepath)
extensao = string.split(".")
tipo = extensao[1]
print(tipo)
# if tipo == "txt" or tipo == "html" or tipo == "css" or tipo =="js":
# with open(filepath, "rt") as text:
# lido = text.read()
# return lido
# else:
with open(filepath, "rb") as file:
lido = file.read()
return lido
# Implemente a função load_data, que recebe o nome de um arquivo JSON e devolve o conteúdo do arquivo carregado
# como um objeto Python (A função deve assumir que este arquivo JSON está localizado dentro da pasta data). Por exemplo:
# se o conteúdo do arquivo data/dados.json for a string {"chave": "valor"}, sua função deve devolver o dicionário Python {"chave": "valor"}
# para a entrada dados.json (note que o nome da pasta não é enviado como argumento). Dica: já existe uma função Python para isso
# (e você viu em Design de Software).
def load_data():
db = Database('banco')
notes = db.get_all()
return notes
# Implemente a função load_template que recebe o nome de um arquivo de template e devolve uma string com o conteúdo desse arquivo.
# O nome do arquivo não inclui o nome da pasta templates. Por exemplo: para a entrada index.html você deve carregar o conteúdo do arquivo templates/index.html.
def load_template(fileName):
file = open('templates/' + fileName, encoding="UTF-8")
conteudo = file.read()
file.close()
return conteudo
# Ainda na função index(request) do arquivo views.py, adicione a nova anotação (que deverá estar armazenada em params['titulo'] e params['detalhes']) ao arquivo notes.json.
# Dica: crie uma função no arquivo utils.py que recebe a nova anotação e a adiciona à lista do arquivo notes.json.
def adicionar(dict):
db = Database('banco')
add_note = db.add(Note(title= dict['titulo'], content=dict['detalhes']))
def deletar(id):
db = Database('banco')
deletar = db.delete(id)
def editar(id, dict):
db = Database('banco')
update = db.update(Note(id= id, title = dict['titulo'], content=dict['detalhes']))
# Implemente a função build_response no arquivo utils.py. Ele deve receber os seguintes argumentos: build_response(body='', code=200, reason='OK', headers='')
# (talvez você queira ler isso: https://docs.python.org/3/tutorial/controlflow.html#default-argument-values).
# Lembre-se de testar a sua função com python test_utils.py.
def build_response(body='', code=200, reason='OK', headers=''):
if len(headers) != 0:
convertido = ("HTTP/1.1 " + str(code)+ " " + reason + '\n' + headers + '\n\n' + body).encode()
else:
convertido = ("HTTP/1.1 " + str(code)+ " " + reason + '\n\n' + body).encode()
return convertido
|
[
"liviasm1@al.insper.edu.br"
] |
liviasm1@al.insper.edu.br
|
8eb7d50adbae676f772f6827f7c84d8bfecc0ec0
|
16e362038db128d69f82dabea09de00b1cc7d416
|
/core/admin.py
|
89ed347c98eea68a1667d3638066ec6e4bbdaab7
|
[] |
no_license
|
rcdnb/loremipsumbackend
|
e838f533a081e32e115b09a85a508c27787cf584
|
57bd4a32d79c4b93a3a381f6b7a95c10df135203
|
refs/heads/master
| 2023-02-24T23:45:43.585516
| 2021-01-24T19:46:58
| 2021-01-24T19:46:58
| 332,110,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
from django.contrib import admin
from .models import Projeto
admin.site.register(Projeto)
# Register your models here.
|
[
"ruan.cnbarros@gmail.com"
] |
ruan.cnbarros@gmail.com
|
e6a9b27d4fc8a81bc9f2d8de2676004c4931ef9c
|
a4f8dee27d379764aee05780ebae455bce8d7340
|
/transaction/amount/migrations/0005_remove_spent_name.py
|
e70a66b6614358674050437492e15886fd052ecc
|
[] |
no_license
|
Mayur26690/Transactions
|
c955188acf915ba3ae798539a669f8e662aba160
|
8bcc0e3a73b1ab2a8fbfa05c61732196b7637617
|
refs/heads/master
| 2021-01-01T04:23:30.998066
| 2017-07-19T04:13:10
| 2017-07-19T04:13:10
| 97,168,724
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-18 23:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amount', '0004_auto_20170713_2317'),
]
operations = [
migrations.RemoveField(
model_name='spent',
name='name',
),
]
|
[
"Mayur@Niravs-MacBook-Pro.local"
] |
Mayur@Niravs-MacBook-Pro.local
|
73bc27a2b6f068eb640e1c8b4dd5a99a733b4a9b
|
70f2fde100207b9eab8833facf8f6e291495bc5c
|
/src/mdns_browser/agents/comms.py
|
6ae7ee35f48f46db9bbfaba5e01f14bd7c44fb5e
|
[] |
no_license
|
jldupont/mdns-browser
|
5ca6b6414c07b62e390564892a9168f60e33f406
|
845851a954b4319b9b87def154eb709e970232d1
|
refs/heads/master
| 2016-09-09T22:12:25.684991
| 2012-04-21T17:09:44
| 2012-04-21T17:09:44
| 1,230,822
| 4
| 1
| null | 2013-02-19T14:55:19
| 2011-01-07T18:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,643
|
py
|
"""
Comms Agent
MESSAGES PROCESSED:
- "__tick__"
- "query"
MESSAGES EMITTED:
- "packet"
@date: 2011-01-07
@author: jldupont
"""
_MDNS_ADDR = '224.0.0.251'
_MDNS_PORT = 5353;
_MAX_MSG_ABSOLUTE = 8972
_SELECT_TIMEOUT=0.5
import socket
import select
from mdns_browser.system.base import AgentThreadedBase
class CommsAgent(AgentThreadedBase):
def __init__(self):
AgentThreadedBase.__init__(self)
self._failures=[]
## can't really fail here
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
##The following doesn't work on Linux
try: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except: pass
except:
self._failures.append("Socket Options: REUSEADDR")
try:
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
except:
self._failures.append("Socket Options: Multicast")
try:
self.group = ('', _MDNS_PORT)
self.socket.bind(self.group)
except:
# Some versions of linux raise an exception even though
# the SO_REUSE* options have been set, so ignore it
#
pass
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton('0.0.0.0'))
self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
def h_query(self, protocol_msg):
try:
_bytes_sent = self.socket.sendto(protocol_msg, 0, (_MDNS_ADDR, _MDNS_PORT))
except:
# Ignore this, it may be a temporary loss of network connection
pass
def h___tick__(self, *_):
""" Might have to tweak receive interval...
"""
if len(self._failures) > 0:
self.log("c", "Network Socket Error: %s" % self._failures)
try:
rr, _wr, _er = select.select([self.socket,], [], [], _SELECT_TIMEOUT)
if rr:
try:
data, (addr, port) = self.socket.recvfrom(_MAX_MSG_ABSOLUTE)
self.pub("packet", data, addr, port)
except:
pass
except Exception, e:
self.pub("llog", "Receive Error: " % e)
_=CommsAgent()
_.start()
|
[
"github@jldupont.com"
] |
github@jldupont.com
|
214b48c799475713810c0520b6dd6b4ff5350818
|
02563d2825d1dbf82b3b7e7fff3265814cf02338
|
/api/complejo/serializers.py
|
a708d54051165af812b5e58a050abb14469d06bc
|
[] |
no_license
|
hatsem78/natagua
|
e9bc4d15f5385e48dc08487e118e25694c1c1159
|
150966143fdd56aba474ca01ebb43a21e75f71be
|
refs/heads/master
| 2023-03-04T14:38:42.933083
| 2021-05-19T13:30:50
| 2021-05-19T13:30:50
| 187,080,249
| 2
| 0
| null | 2022-12-09T02:47:35
| 2019-05-16T18:22:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics, serializers
from app_natagua.models import Complejo
class ComplejoPagSerializer(serializers.Serializer):
class Meta:
model = Complejo
fields = fields = ('id', 'nombre', 'direccion', 'telefono', 'descripcion')
id = serializers.IntegerField(read_only=True)
dni = serializers.IntegerField()
nombre = serializers.CharField(required=True, allow_blank=False, max_length=100)
direccion = serializers.CharField(max_length=200)
telefono = serializers.CharField(max_length=50, allow_blank=True)
description = serializers.CharField(max_length=500, allow_blank=True)
class ComplejoSerializer(serializers.Serializer):
class Meta:
model = Complejo
fields = fields = ('id', 'nombre', 'direccion', 'telefono', 'descripcion')
id = serializers.IntegerField(read_only=True)
nombre = serializers.CharField(required=True, allow_blank=False, max_length=100)
direccion = serializers.CharField(max_length=200, allow_blank=True, required=False)
telefono = serializers.CharField(max_length=50, allow_blank=True)
description = serializers.CharField(max_length=500, allow_blank=True, required=False)
def create(self, validated_data):
"""
Create and return a new `Complejo` instance, given the validated data.
"""
return Complejo.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Complejo` instance, given the validated data.
"""
instance.nombre = validated_data.get('nombre', instance.nombre)
instance.direccion = validated_data.get('direccion', instance.direccion)
instance.telefono = validated_data.get('telefono', instance.telefono)
instance.descripcion = validated_data.get('descripcion', instance.descripcion)
instance.save()
return instance
|
[
"ohatsembiller@kiusys.com"
] |
ohatsembiller@kiusys.com
|
277cd7ab0521984adf29cef8e2141744d1ebee3b
|
0e8518907f1eadfc2725c36038108daec17744e0
|
/lib/cogs/help.py
|
ac04132a38f24f9e7c4b8005a98c14208fc10754
|
[] |
no_license
|
NamitS27/NZEC-Bot
|
6188c0f5dd3319b1ce66cf9263c85ac96ec02674
|
11768f70b29fe02199bd2586954c91be828ed565
|
refs/heads/master
| 2023-02-22T14:08:33.831297
| 2021-01-20T03:13:56
| 2021-01-20T03:13:56
| 324,370,393
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,796
|
py
|
from typing import Optional
from discord import Embed
from discord.utils import get
from discord.ext.menus import MenuPages, ListPageSource
from discord.ext.commands import Cog
from discord.ext.commands import command
def syntax(command):
cmd_and_aliases = "|".join([str(command), *command.aliases])
params = []
for key, value in command.params.items():
if key not in ("self", "ctx"):
params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
params = " ".join(params)
return f"```{cmd_and_aliases} {params}```"
class HelpMenu(ListPageSource):
def __init__(self, ctx, data):
self.ctx = ctx
super().__init__(data, per_page=3)
async def write_page(self, menu, fields=[]):
offset = (menu.current_page*self.per_page) + 1
len_data = len(self.entries)
embed = Embed(title="Help",
description="Welcome to the NZEC help dialog! Command Prefix = '~'",
colour=self.ctx.author.colour)
#embed.set_thumbnail(url=self.ctx.guild.me.avatar_url)
embed.set_footer(text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} commands.")
for name, value in fields:
embed.add_field(name=name, value=value, inline=False)
return embed
async def format_page(self, menu, entries):
fields = []
for entry in entries:
if entry.name=="mashup":
fields.append((entry.brief or "No description", f"```\n{entry.usage}\n```"))
else:
fields.append((entry.brief or "No description", syntax(entry)))
return await self.write_page(menu, fields)
class Help(Cog):
def __init__(self, bot):
self.bot = bot
self.bot.remove_command("help")
async def cmd_help(self, ctx, command):
embed = Embed(title=f"Help with `{command}`",
description=syntax(command),
colour=ctx.author.colour)
embed.add_field(name="Command description", value=command.help)
await ctx.send(embed=embed)
@command(name="help",brief="Help command for respective commands")
async def show_help(self, ctx, cmd: Optional[str]):
"""
Welcome to the help command. Seek the required help.
Commands are to be executed using the prefix '~'. For eg. `~help` or `~plotr tourist`.
Get the help of the respective commands by adding an argument of the command name along with the help to get more details about how to execute the command.
"""
if cmd is None:
menu = MenuPages(source=HelpMenu(ctx, list(self.bot.commands)),
delete_message_after=True,
timeout=180.0)
await menu.start(ctx)
else:
command = get(self.bot.commands, name=cmd)
if command:
await self.cmd_help(ctx, command)
else:
await ctx.send("That command does not exist.")
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("help")
def setup(bot):
bot.add_cog(Help(bot))
|
[
"namit.s@ahduni.edu.in"
] |
namit.s@ahduni.edu.in
|
3a44b375f311fef2f46d4257c911f1685ceea2b7
|
bc6492a9a30ac7228caad91643d58653b49ab9e3
|
/sympy/physics/quantum/density.py
|
46520bc81aa4e43887fdfe199fe4ad6471d4caa2
|
[] |
no_license
|
cosmosZhou/sagemath
|
2c54ea04868882340c7ef981b7f499fb205095c9
|
0608b946174e86182c6d35d126cd89d819d1d0b8
|
refs/heads/master
| 2023-01-06T07:31:37.546716
| 2020-11-12T06:39:22
| 2020-11-12T06:39:22
| 311,177,322
| 1
| 0
| null | 2020-11-12T06:09:11
| 2020-11-08T23:42:40
|
Python
|
UTF-8
|
Python
| false
| false
| 10,163
|
py
|
from __future__ import print_function, division
from itertools import product
from sympy import Tuple, Add, Mul, Matrix, log, expand, Rational
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.matrixutils import numpy_ndarray, scipy_sparse_matrix, to_numpy
from sympy.physics.quantum.tensorproduct import TensorProduct, tensor_product_simp
class Density(HermitianOperator):
"""Density operator for representing mixed states.
TODO: Density operator support for Qubits
Parameters
==========
values : tuples/lists
Each tuple/list should be of form (state, prob) or [state,prob]
Examples
========
Create a density operator with 2 states represented by Kets.
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d
'Density'((|0>, 0.5),(|1>, 0.5))
"""
@classmethod
def _eval_args(cls, args):
# call this to qsympify the args
args = super(Density, cls)._eval_args(args)
for arg in args:
# Check if arg is a tuple
if not (isinstance(arg, Tuple) and
len(arg) == 2):
raise ValueError("Each argument should be of form [state,prob]"
" or ( state, prob )")
return args
def states(self):
"""Return list of all states.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()
(|0>, |1>)
"""
return Tuple(*[arg[0] for arg in self.args])
def probs(self):
"""Return list of all probabilities.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()
(0.5, 0.5)
"""
return Tuple(*[arg[1] for arg in self.args])
def get_state(self, index):
"""Return specific state by index.
Parameters
==========
index : index of state to be returned
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()[1]
|1>
"""
state = self.args[index][0]
return state
def get_prob(self, index):
"""Return probability of specific state by index.
Parameters
===========
index : index of states whose probability is returned.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()[1]
0.500000000000000
"""
prob = self.args[index][1]
return prob
def apply_op(self, op):
"""op will operate on each individual state.
Parameters
==========
op : Operator
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.apply_op(A)
'Density'((A*|0>, 0.5),(A*|1>, 0.5))
"""
new_args = [(op*state, prob) for (state, prob) in self.args]
return Density(*new_args)
def doit(self, **hints):
"""Expand the density operator into an outer product format.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.doit()
0.5*|0><0| + 0.5*|1><1|
"""
terms = []
for (state, prob) in self.args:
state = state.expand() # needed to break up (a+b)*c
if (isinstance(state, Add)):
for arg in product(state.args, repeat=2):
terms.append(prob *
self._generate_outer_prod(arg[0], arg[1]))
else:
terms.append(prob *
self._generate_outer_prod(state, state))
return Add(*terms)
def _generate_outer_prod(self, arg1, arg2):
c_part1, nc_part1 = arg1.args_cnc()
c_part2, nc_part2 = arg2.args_cnc()
if ( len(nc_part1) == 0 or
len(nc_part2) == 0 ):
raise ValueError('Atleast one-pair of'
' Non-commutative instance required'
' for outer product.')
# Muls of Tensor Products should be expanded
# before this function is called
if (isinstance(nc_part1[0], TensorProduct) and
len(nc_part1) == 1 and len(nc_part2) == 1):
op = tensor_product_simp(nc_part1[0] * Dagger(nc_part2[0]))
else:
op = Mul(*nc_part1) * Dagger(Mul(*nc_part2))
return Mul(*c_part1)*Mul(*c_part2)*op
def _represent(self, **options):
return represent(self.doit(), **options)
def _print_operator_name_latex(self, printer, *args):
return printer._print(r'\rho', *args)
def _print_operator_name_pretty(self, printer, *args):
return prettyForm(unichr('\N{GREEK SMALL LETTER RHO}'))
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', [])
return Tr(self.doit(), indices).doit()
def entropy(self):
""" Compute the entropy of a density matrix.
Refer to density.entropy() method for examples.
"""
return entropy(self)
def entropy(density):
"""Compute the entropy of a matrix/density object.
This computes -Tr(density*ln(density)) using the eigenvalue decomposition
of density, which is given as either a Density instance or a matrix
(numpy.ndarray, sympy.Matrix or scipy.sparse).
Parameters
==========
density : density matrix of type Density, sympy matrix,
scipy.sparse or numpy.ndarray
Examples
========
>>> from sympy.physics.quantum.density import Density, entropy
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.matrixutils import scipy_sparse_matrix
>>> from sympy.physics.quantum.spin import JzKet, Jz
>>> from sympy import S, log
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> d = Density((up,S(1)/2),(down,S(1)/2))
>>> entropy(d)
log(2)/2
"""
if isinstance(density, Density):
density = represent(density) # represent in Matrix
if isinstance(density, scipy_sparse_matrix):
density = to_numpy(density)
if isinstance(density, Matrix):
eigvals = density.eigenvals().keys()
return expand(-sum(e*log(e) for e in eigvals))
elif isinstance(density, numpy_ndarray):
import numpy as np
eigvals = np.linalg.eigvals(density)
return -np.sum(eigvals*np.log(eigvals))
else:
raise ValueError(
"numpy.ndarray, scipy.sparse or sympy matrix expected")
def fidelity(state1, state2):
""" Computes the fidelity [1]_ between two quantum states
The arguments provided to this function should be a square matrix or a
Density object. If it is a square matrix, it is assumed to be diagonalizable.
Parameters
==========
state1, state2 : a density matrix or Matrix
Examples
========
>>> from sympy import S, sqrt
>>> from sympy.physics.quantum.dagger import Dagger
>>> from sympy.physics.quantum.spin import JzKet
>>> from sympy.physics.quantum.density import Density, fidelity
>>> from sympy.physics.quantum.represent import represent
>>>
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> amp = 1/sqrt(2)
>>> updown = (amp * up) + (amp * down)
>>>
>>> # represent turns Kets into matrices
>>> up_dm = represent(up * Dagger(up))
>>> down_dm = represent(down * Dagger(down))
>>> updown_dm = represent(updown * Dagger(updown))
>>>
>>> fidelity(up_dm, up_dm)
1
>>> fidelity(up_dm, down_dm) #orthogonal states
0
>>> fidelity(up_dm, updown_dm).evalf().round(3)
0.707
References
==========
.. [1] https://en.wikipedia.org/wiki/Fidelity_of_quantum_states
"""
state1 = represent(state1) if isinstance(state1, Density) else state1
state2 = represent(state2) if isinstance(state2, Density) else state2
if (not isinstance(state1, Matrix) or
not isinstance(state2, Matrix)):
raise ValueError("state1 and state2 must be of type Density or Matrix "
"received type=%s for state1 and type=%s for state2" %
(type(state1), type(state2)))
if ( state1.shape != state2.shape and state1.is_square):
raise ValueError("The dimensions of both args should be equal and the "
"matrix obtained should be a square matrix")
sqrt_state1 = state1**Rational(1, 2)
return Tr((sqrt_state1 * state2 * sqrt_state1)**Rational(1, 2)).doit()
|
[
"74498494@qq.com"
] |
74498494@qq.com
|
95169e3bc64ea480bf42ee41a866d3671ac91f52
|
20eff49a6c45b7c2877df0f530133b512a5d55e9
|
/18429 근손실 실버3.py
|
426f6a2473839db83bc7988c4c09820d78c34333
|
[] |
no_license
|
mintai09/BAEKJOON
|
b4fb0ec7fa5964c2a965c3da1cc86ef5383db6ce
|
ba6a738de1956fd7fb790e4203eef1c19aac684c
|
refs/heads/master
| 2023-02-19T16:07:02.268823
| 2021-01-19T13:07:25
| 2021-01-19T13:07:25
| 330,980,300
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
import sys
N,K = map(int,input().split())
A = list(map(int,input().split()))
result = 0
def dfs(cnt,v,e):
global result
if cnt == N:
result += 1
for i in range(N):
if i not in v and e - K + A[i] >= 500:
temp = v + [i]
dfs(cnt+1,temp,e - K + A[i])
dfs(0,[],500)
print(result)
|
[
"mintai09@gmail.com"
] |
mintai09@gmail.com
|
cd8c9e0f263a8f21d2565174911ab21daa613636
|
11b15acefe68d70b2f4c1b8468804352e87ddf4a
|
/test/test_segment_tree.py
|
aa6655054536d5edc8cabf09211e6ab3d0cbde94
|
[] |
no_license
|
chenhaocmk/data_structure
|
2f9b772a6224e1b3063ba0096b5ceb948c45e5f7
|
29cb46fa347452e0d4a7d44bade36f0711e1429b
|
refs/heads/master
| 2020-03-16T05:47:49.917465
| 2018-05-16T16:07:36
| 2018-05-16T16:07:36
| 132,540,690
| 0
| 0
| null | 2018-05-16T16:07:37
| 2018-05-08T02:10:41
|
Python
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
from src.segment_tree import SegmentTree
from util.visualizer import print_tree
tree = SegmentTree(list(range(21)))
print_tree(tree.root, lambda x: [y for y in (x.left, x.right)] if x else [])
print(tree.get_sum(0, 0))
print(tree.get_sum(0, 1))
print(tree.get_sum(5, 20))
print(tree.get_sum(1, 20))
print('\n')
tree.update(5, 10)
print_tree(tree.root, lambda x: [y for y in (x.left, x.right)] if x else [])
|
[
"chenhao_ch@outlook.com"
] |
chenhao_ch@outlook.com
|
2416f40e5653e4adfdb5d75d394d1fbc5238f04e
|
e8de20423ed1c4d057885719ff90797a6081e25b
|
/rentals/management/commands/calculate.py
|
f16d6a387863d0674c5d24de542d0b1ff4f99d09
|
[] |
no_license
|
po5i/test__stackbuilders
|
8d88c251a5f0190d79739c219c900531959caf43
|
b97bdd698a382421ae1fc083d17d45dd3865c9be
|
refs/heads/master
| 2021-09-01T21:56:58.100891
| 2017-12-28T20:57:52
| 2017-12-28T20:57:52
| 114,693,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
from django.core.management.base import BaseCommand, CommandError
from rentals.models import *
import json
import datetime
class Command(BaseCommand):
help = 'Usage example: python manage.py calculate \'{"rentDates":["2017-11-19T05:00:00.000Z","2017-11-20T05:00:00.000Z","2017-11-21T05:00:00.000Z"],"car":{"model":"Cherato","type":"sport"},"membership":false,"age":24}\''
def add_arguments(self, parser):
parser.add_argument('json', type=str)
def handle(self, *args, **options):
"""
Input a json, and get a json
"""
parsed_data = json.loads(options["json"])
car = Car.objects.get(model=parsed_data["car"]["model"])
rental = Rental.objects.create(car=car, membership=parsed_data["membership"], age=parsed_data["age"])
for date_str in parsed_data["rentDates"]:
date = datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%fZ").date()
RentalDates.objects.create(rental=rental, date=date)
output = json.dumps(rental.generate_output(), ensure_ascii=False)
self.stdout.write('===================================================')
self.stdout.write(output)
self.stdout.write('===================================================')
self.stdout.write('Completed!')
rental.delete()
|
[
"carlos.po5i@gmail.com"
] |
carlos.po5i@gmail.com
|
6114aa1f623aa605138002084a9deeccae1e460e
|
b13353af6fa84b560d0844a0b1e4a2a86f9103e9
|
/assign3/KNN_Classifier.py
|
67038bfd1899e4253c04250035782ff04656dcf4
|
[] |
no_license
|
AadityaDeshpande/LP3
|
71866f1b766d8e5727423f45d98fc5412d22910f
|
741dde3b267d748703dc0e7bfe37f1a6d11f7e1b
|
refs/heads/master
| 2020-11-26T04:48:59.496838
| 2020-04-03T16:40:42
| 2020-04-03T16:40:42
| 228,967,812
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
p1=[2,4]
p2=[4,2]
p3=[4,4]
p4=[4,6]
p5=[6,2]
p6=[6,4]
x=[p1,p2,p3,p4,p5,p6]
y=[0,0,1,0,1,0] # 0 for orange and 1 for blue
classifier=KNeighborsClassifier(n_neighbors=3,p=2, metric='minkowski')
classifier.fit(x,y)
x_pred=np.array([6,6])
y_pred=classifier.predict(x_pred.reshape(1,-1))
plt.scatter(p1[0],p1[1],c='orange',marker='s')
plt.scatter(p2[0],p2[1],c='orange',marker='s')
plt.scatter(p3[0],p3[1],c='blue',)
plt.scatter(p4[0],p4[1],c='orange',marker='s')
plt.scatter(p5[0],p5[1],c='blue')
plt.scatter(p6[0],p6[1],c='orange',marker='s')
if(y_pred==0):
color='orange'
marker='s'
else:
color='blue'
marker='.'
plt.scatter(x_pred[0],x_pred[1],c=color,marker=marker,s=400)
print("Point (6,6) gets classified as ",color)
plt.show()
|
[
"noreply@github.com"
] |
AadityaDeshpande.noreply@github.com
|
628717c20363b27b345cd44c1ecd110d42795a72
|
60d783b91fd2926ac1d9c231669f1fdbb8fdf192
|
/backend/post/admin.py
|
1242b8c0ebb64d35b842402b79126229b327d52a
|
[] |
no_license
|
tanmaypardeshi/Ocean
|
e9fc9a68f326755b349d92d3ce21056af1a6eae5
|
ce4b88e4476a0e00e3a75c309bf1a5da23a42f02
|
refs/heads/master
| 2023-04-15T18:23:15.321061
| 2021-04-27T06:29:28
| 2021-04-27T06:29:28
| 293,098,502
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
from django.contrib import admin
from .models import Post, Tag, Like, Comment, Delete
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['author', 'title']
ordering = ['published_at']
admin.site.register(Tag)
admin.site.register(Like)
admin.site.register(Comment)
admin.site.register(Delete)
|
[
"tanmaypardeshi@gmail.com"
] |
tanmaypardeshi@gmail.com
|
fa8d990ae169ae8c132890e137da7d2498437a79
|
f870df1a117575bc0aef6e9796c912531347061c
|
/src/restaurants/migrations/0008_remove_restaurantlocation_my_date_field.py
|
8d22f838faca496c2910740ef7bf7b0841d08197
|
[] |
no_license
|
PatrykJanMatlak/django-test
|
c39af7795abfc064264cf1b9742ee84a097917e6
|
ab434c39d950114feee91e39e798db40fa5cdb0e
|
refs/heads/master
| 2020-03-21T01:13:14.005746
| 2018-07-09T14:00:41
| 2018-07-09T14:00:41
| 137,930,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-30 10:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0007_restaurantlocation_slug'),
]
operations = [
migrations.RemoveField(
model_name='restaurantlocation',
name='my_date_field',
),
]
|
[
"patrykjan.matlak@gmail.com"
] |
patrykjan.matlak@gmail.com
|
a1ddfdacb1ec66499586b96730492508a864f4ae
|
4c3ce2d2c1bbf0f054fba369b1dc5c51f8ff6663
|
/portfolio-1/personal_portfolio/hello_world/views.py
|
cf57db26d237ebf4a5b6ae72bcefa9247e678ada
|
[] |
no_license
|
pulkitkinra01/DjangoPracticeCodes
|
39061adc25e7dd90e986e885fa145fda0d54d7af
|
2677659a3c53204eef45f05a39848dbb1ca26935
|
refs/heads/master
| 2023-02-13T04:13:15.512398
| 2020-12-29T22:56:22
| 2020-12-29T22:56:22
| 325,404,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from django.shortcuts import render
# Create your views here.
# from django.shortcuts import render
def hello_world(request):
return render(request, 'hello_world.html', {})
|
[
""
] | |
84cd9e7aa90a30f967d5a07b5421710e52d454f7
|
27cb9cc771ffa02c4f7e12dcd4688e311c63aace
|
/fairseq/data/truncate_dataset.py
|
32e47ee91bd10a54ca27eb08d46fb0f5e731e0c3
|
[
"MIT"
] |
permissive
|
periclesmiranda/TSPNet
|
78aee61a4e4497ae82b1bb6731a6edd6230720cd
|
8f71315486c78b540382ef6420eab5441333bcda
|
refs/heads/main
| 2023-07-19T16:06:48.169045
| 2021-09-10T15:08:36
| 2021-09-10T15:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from . import BaseWrapperDataset
class TruncateDataset(BaseWrapperDataset):
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert truncation_length is not None
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if item_len > self.truncation_length:
item = item[:self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
|
[
"chenchen.xu@anu.edu.au"
] |
chenchen.xu@anu.edu.au
|
7a591f0bde3ee8e3a8d458e8e79f981b9b842aa8
|
7c98e8d730d6741b987adb7f0f950fa3f22ce8e7
|
/dm_tools/linked_lists.py
|
56d79adf0bbdea947b82a2c09bb9c24a0e0ad813
|
[
"MIT"
] |
permissive
|
sjsafranek/dm_tools
|
e68d85bd30e4b14b64a292d2802ca2751d24859d
|
10a48d5c6b50428221a445d4f2eeeb060898dcfe
|
refs/heads/master
| 2020-03-20T15:20:34.622179
| 2018-11-23T05:11:11
| 2018-11-23T05:11:11
| 137,510,621
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,578
|
py
|
# https://stonesoupprogramming.com/2017/05/21/circular-linked-list-python/
from enum import Enum
class NodeConstants(Enum):
FRONT_NODE = 1
class Node:
def __init__(self, element=None, next_node=None):
self.element = element
self.next_node = next_node
def __str__(self):
if self.element:
return self.element.__str__()
else:
return 'Empty Node'
def __repr__(self):
return self.__str__()
class CircularLinkedList:
def __init__(self):
self.head = Node(element=NodeConstants.FRONT_NODE)
self.head.next_node = self.head
def size(self):
count = 0
current = self.head.next_node
while current != self.head:
count += 1
current = current.next_node
return count
def insert_front(self, data):
node = Node(element=data, next_node=self.head.next_node)
self.head.next_node = node
def insert_last(self, data):
current_node = self.head.next_node
while current_node.next_node != self.head:
current_node = current_node.next_node
node = Node(element=data, next_node=current_node.next_node)
current_node.next_node = node
def insert(self, data, position):
if position == 0:
self.insert_front(data)
elif position == self.size():
self.insert_last(data)
else:
if 0 < position < self.size():
current_node = self.head.next_node
current_pos = 0
while current_pos < position - 1:
current_pos += 1
current_node = current_node.next_node
node = Node(data, current_node.next_node)
current_node.next_node = node
else:
raise IndexError
def remove_first(self):
self.head.next_node = self.head.next_node.next_node
def remove_last(self):
current_node = self.head.next_node
while current_node.next_node.next_node != self.head:
current_node = current_node.next_node
current_node.next_node = self.head
def remove(self, position):
if position == 0:
self.remove_first()
elif position == self.size():
self.remove_last()
else:
if 0 < position < self.size():
current_node = self.head.next_node
current_pos = 0
while current_pos < position - 1:
current_node = current_node.next_node
current_pos += 1
current_node.next_node = current_node.next_node.next_node
else:
raise IndexError
def fetch(self, position):
if 0 <= position < self.size():
current_node = self.head.next_node
current_pos = 0
while current_pos < position:
current_node = current_node.next_node
current_pos += 1
return current_node.element
else:
raise IndexError
import unittest
from random import randint
class TestCircularLinkedList(unittest.TestCase):
names = ['Bob Belcher',
'Linda Belcher',
'Tina Belcher',
'Gene Belcher',
'Louise Belcher']
def test_init(self):
dll = CircularLinkedList()
self.assertIsNotNone(dll.head)
self.assertEqual(dll.size(), 0)
def test_insert_front(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_front(name)
self.assertEqual(dll.fetch(0), TestCircularLinkedList.names[4])
self.assertEqual(dll.fetch(1), TestCircularLinkedList.names[3])
self.assertEqual(dll.fetch(2), TestCircularLinkedList.names[2])
self.assertEqual(dll.fetch(3), TestCircularLinkedList.names[1])
self.assertEqual(dll.fetch(4), TestCircularLinkedList.names[0])
def test_insert_last(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_last(name)
for i in range(len(TestCircularLinkedList.names) - 1):
self.assertEqual(dll.fetch(i), TestCircularLinkedList.names[i])
def test_insert(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_last(name)
pos = randint(0, len(TestCircularLinkedList.names) - 1)
dll.insert('Teddy', pos)
self.assertEqual(dll.fetch(pos), 'Teddy')
def test_remove_first(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_last(name)
for i in range(dll.size(), 0, -1):
self.assertEqual(dll.size(), i)
dll.remove_first()
def test_remove_last(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_last(name)
for i in range(dll.size(), 0, -1):
self.assertEqual(dll.size(), i)
dll.remove_last()
def test_remove(self):
dll = CircularLinkedList()
for name in TestCircularLinkedList.names:
dll.insert_last(name)
dll.remove(1)
self.assertEqual(dll.fetch(0), 'Bob Belcher')
self.assertEqual(dll.fetch(1), 'Tina Belcher')
self.assertEqual(dll.fetch(2), 'Gene Belcher')
self.assertEqual(dll.fetch(3), 'Louise Belcher')
if __name__ == '__main__':
unittest.main()
|
[
"stefan@stefan.stefan"
] |
stefan@stefan.stefan
|
cd252c1cfc259525e8424c5377f0780794ff1749
|
7c68d5b5d143354bf16cd545c32c6ed6ab7617f1
|
/squad_spacy/bert_ner.py
|
59a0a97fa511f5fec6714fd2f3ad905b54ff0006
|
[] |
no_license
|
Minniemu/keyword_squad_spacy
|
fbf2fa07abf370f7286a77b9a4a5ac96e411a872
|
eea41a6dff4ac7cb2833a96e8adca6b24efed897
|
refs/heads/main
| 2023-05-01T09:35:02.063194
| 2021-04-11T13:55:59
| 2021-04-11T13:55:59
| 350,987,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,538
|
py
|
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
data = pd.read_csv("result2.csv", encoding="latin1").fillna(method="ffill")
print(data.tail(10))
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [(w, p, t) for w, p, t in zip(s["Word"].values.tolist(),
s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
sentences = [[word[0] for word in sentence] for sentence in getter.sentences]
print(sentences[0])
labels = [[s[2] for s in sentence] for sentence in getter.sentences]
print(labels[0])
tag_values = list(set(data["Tag"].values))
tag_values.append("PAD")
tag_values.sort()
tag2idx = {t: i for i, t in enumerate(tag_values)}
print(tag_values)
print(tag2idx)
#Apply Bert
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig, BertModel
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
print(torch.__version__)
MAX_LEN = 75
bs = 32
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print(torch.cuda.get_device_name(0))
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
def tokenize_and_preserve_labels(sentence, text_labels):
tokenized_sentence = []
labels = []
for word, label in zip(sentence, text_labels):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
tokenized_texts_and_labels = [
tokenize_and_preserve_labels(sent, labs)
for sent, labs in zip(sentences, labels)
]
tokenized_texts = [token_label_pair[0] for token_label_pair in tokenized_texts_and_labels]
labels = [token_label_pair[1] for token_label_pair in tokenized_texts_and_labels]
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", value=0.0,
truncating="post", padding="post")
tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in labels],
maxlen=MAX_LEN, value=tag2idx["PAD"], padding="post",
dtype="long", truncating="post")
attention_masks = [[float(i != 0.0) for i in ii] for ii in input_ids]
tr_inputs, val_inputs, tr_tags, val_tags = train_test_split(input_ids, tags,
random_state=2018, test_size=0.1)
tr_masks, val_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
tr_inputs = torch.tensor(tr_inputs)
val_inputs = torch.tensor(val_inputs)
tr_tags = torch.tensor(tr_tags)
val_tags = torch.tensor(val_tags)
tr_masks = torch.tensor(tr_masks)
val_masks = torch.tensor(val_masks)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs)
import transformers
from transformers import BertForTokenClassification, AdamW
print(transformers.__version__)
model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
num_labels=len(tag2idx),
output_attentions = False,
output_hidden_states = False
)
model.cuda();
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=3e-5,
eps=1e-8
)
from transformers import get_linear_schedule_with_warmup
epochs = 3
max_grad_norm = 1.0
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
#Fit BERT for named entity recognition
from seqeval.metrics import f1_score, accuracy_score
## Store the average loss after each epoch so we can plot them.
loss_values, validation_loss_values = [], []
for _ in trange(epochs, desc="Epoch"):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
# Put the model into training mode.
model.train()
# Reset the total loss for this epoch.
total_loss = 0
# Training loop
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Always clear any previously calculated gradients before performing a backward pass.
model.zero_grad()
# forward pass
# This will return the loss (rather than the model output)
# because we have provided the `labels`.
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# get the loss
loss = outputs[0]
# Perform a backward pass to calculate the gradients.
loss.backward()
# track train loss
total_loss += loss.item()
# Clip the norm of the gradient
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
print("Average train loss: {}".format(avg_train_loss))
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
# Put the model into evaluation mode
model.eval()
# Reset the validation loss for this epoch.
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients,
# saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have not provided labels.
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# Move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
eval_loss += outputs[0].mean().item()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.extend(label_ids)
eval_loss = eval_loss / len(valid_dataloader)
validation_loss_values.append(eval_loss)
print("Validation loss: {}".format(eval_loss))
pred_tags = [tag_values[p_i] for p, l in zip(predictions, true_labels)
for p_i, l_i in zip(p, l) if tag_values[l_i] != "PAD"]
valid_tags = [tag_values[l_i] for l in true_labels
for l_i in l if tag_values[l_i] != "PAD"]
print("Validation Accuracy: {}".format(accuracy_score(pred_tags, valid_tags)))
#print("Validation F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
print()
# save the model to disk
import joblib
filename = 'finalized_model.sav'
joblib.dump(model, filename)
model = joblib.load(filename)
model.to(device)
test_sentence = """
Ousted WeWork founder Adam Neumann lists his Manhattan penthouse for $37.5 million.
"""
tokenized_sentence = tokenizer.encode(test_sentence)
input_ids = torch.tensor([tokenized_sentence]).cuda()
with torch.no_grad():
output = model(input_ids)
label_indices = np.argmax(output[0].to('cpu').numpy(), axis=2)
# join bpe split tokens
tokens = tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(tag_values[label_idx])
new_tokens.append(token)
for token, label in zip(new_tokens, new_labels):
print("{}\t{}".format(label, token))
|
[
"noreply@github.com"
] |
Minniemu.noreply@github.com
|
0aabe34c8e6843ec5789123f7adb7a8afdf574e2
|
1388bcd6de659ffefe97e7e6c2aee685b5e7c534
|
/stubs/stubs/MhkCosts.pyi
|
7f7163efa25350bd30c0c898018843d33808d350
|
[
"BSD-3-Clause"
] |
permissive
|
BRIK-Engenharia/pysam
|
a7b4b543131043510023a5c17b057ead0b39d440
|
2a4115f34419edf9776b0bbc7b3f453c958ce734
|
refs/heads/master
| 2022-12-06T05:15:35.364375
| 2020-09-03T22:59:17
| 2020-09-03T22:59:17
| 297,958,820
| 1
| 0
|
BSD-3-Clause
| 2020-09-23T12:13:32
| 2020-09-23T12:13:32
| null |
UTF-8
|
Python
| false
| false
| 2,719
|
pyi
|
class MHKCosts(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
array_cable_system_cost_input = float
array_cable_system_cost_method = float
assembly_and_install_cost_input = float
assembly_and_install_cost_method = float
development_cost_input = float
development_cost_method = float
device_rated_power = float
devices_per_row = float
eng_and_mgmt_cost_input = float
eng_and_mgmt_cost_method = float
export_cable_length = float
export_cable_system_cost_input = float
export_cable_system_cost_method = float
inter_array_cable_length = float
lib_wave_device = str
library_or_input_wec = float
marine_energy_tech = float
mooring_found_substruc_cost_input = float
mooring_found_substruc_cost_method = float
offshore_substation_cost_input = float
offshore_substation_cost_method = float
onshore_substation_cost_input = float
onshore_substation_cost_method = float
other_elec_infra_cost_input = float
other_elec_infra_cost_method = float
other_infrastructure_cost_input = float
other_infrastructure_cost_method = float
power_takeoff_system_cost_input = float
power_takeoff_system_cost_method = float
riser_cable_length = float
structural_assembly_cost_input = float
structural_assembly_cost_method = float
system_capacity = float
class Outputs(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
array_cable_system_cost_modeled = float
assembly_and_install_cost_modeled = float
development_cost_modeled = float
eng_and_mgmt_cost_modeled = float
export_cable_system_cost_modeled = float
insurance_during_construction = float
maintenance_cost = float
mooring_found_substruc_cost_modeled = float
offshore_substation_cost_modeled = float
onshore_substation_cost_modeled = float
operations_cost = float
other_elec_infra_cost_modeled = float
other_infrastructure_cost_modeled = float
plant_commissioning_cost_modeled = float
power_takeoff_system_cost_modeled = float
project_contingency = float
reserve_accounts = float
site_access_port_staging_cost_modeled = float
structural_assembly_cost_modeled = float
class MhkCosts(object):
def assign(self, dict):
pass
def value(self, name, value=None):
pass
def execute(self, int_verbosity):
pass
def export(self):
pass
def __getattribute__(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
MHKCosts = MHKCosts
Outputs = Outputs
def default(config) -> MhkCosts:
pass
def new() -> MhkCosts:
pass
def wrap(ssc_data_t) -> MhkCosts:
pass
def from_existing(model, config="") -> MhkCosts:
pass
__loader__ = None
__spec__ = None
|
[
"dguittet@nrel.gov"
] |
dguittet@nrel.gov
|
1f0c1850396ad5ec47107e564098ddbe2ef1e74d
|
e80fcfff6ce24716c4eb0588b8ab0df9aae25cdf
|
/Robots.py
|
6fb7aa799239f2773a7eb648b7ca4e378edb028c
|
[] |
no_license
|
sdl1/robots
|
da3ff62d540e91aedb9f31269284aeb6293194aa
|
ae9dd6ce9ab31b1981c8d4347dea6d1b16d89162
|
refs/heads/master
| 2021-01-19T10:58:16.049137
| 2017-02-16T21:33:59
| 2017-02-16T21:33:59
| 82,224,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,957
|
py
|
from Sprites import Sprite, Missile
from Vector import Vec2d
import Move
import math
import random
class Teams:
BLUE = 0
RED = 1
GREEN = 2
ORANGE = 3
Name = [ "BLUE", "RED", "GREEN", "ORANGE" ]
RGB = [ [0,0,1], [1,0,0], [0,0.6,0], [1,0.5,0] ]
num = len(Name)
class RobotState:
def __init__(self, robot=None):
if(robot==None):
self.position = Vec2d(0,0)
self.direction = Vec2d(0,0)
return
self.UID = robot.UID
self.colour = robot.colour
self.position = robot.position.copy()
self.direction = robot.direction.copy()
self.speed = robot.speed
self.maxspeed = robot.maxspeed
self.hitpoints = robot.hitpoints
self.missilesLeft = robot.missilesLeft
self.signal = robot.signal
def pack(self):
return [ self.UID, self.colour, self.position.x, self.direction.x, self.speed, self.maxspeed, self.hitpoints, self.missilesLeft, self.signal ]
@staticmethod
def unpack(packed):
ret = RobotState()
ret.UID = packed[0]
ret.colour = packed[1]
ret.position.x = packed[2]
ret.direction.x = packed[3]
ret.speed = packed[4]
ret.maxspeed = packed[5]
ret.hitpoints = packed[6]
ret.missilesLeft = packed[7]
ret.signal = packed[8]
return ret
class Robot(Sprite):
colour = 0
viewangle = 180
viewdistance = 900
staticMissile = Missile(Vec2d(0,0), 0, Vec2d(1,0))
def __init__(self, col, pos, maxspeed, dirn, hp, ai):
self.signal = Move.Signals.NONE
self.missilesLeft = 1
self.laser_cooldown = 0
self.laser_max_cooldown = 2
self.laser_overheated = False
rad = 8
self.ai = ai
Sprite.__init__(self, pos, maxspeed, dirn, hp, rad)
self.colour = col
def die(self):
self.ai.die()
def gameOver(self):
self.ai.gameOver()
def draw(self, cr, simple=False):
cr.set_line_width(4)
rgb = Teams.RGB[self.colour]
cr.set_source_rgb(rgb[0], rgb[1], rgb[2])
if simple:
r = cr.device_to_user_distance(0.3*self.boundingradius, 1.0)[0]
cr.arc(0, 0, r, 0, 2*math.pi)
cr.fill()
return
cr.move_to(0, 0)
cr.rel_line_to(20*self.direction[0], 20*self.direction[1])
cr.stroke()
cr.arc(0, 0, self.boundingradius, 0, 2 * math.pi)
cr.stroke_preserve()
health = self.hitpoints / float(100)
cr.set_source_rgb(health, health, health)
cr.fill()
cr.set_source_rgb(0, 0, 1)
theta = self.directionAngle()
cr.rotate(theta)
cr.scale(0.5, 0.5)
for i in range(0, self.missilesLeft):
#cr.arc(-2 + 6*i, self.boundingradius*2, 2, 0, 2 * math.pi)
#cr.fill()
cr.translate(0, self.boundingradius*4 + 15*i)
Robot.staticMissile.draw(cr)
cr.translate(0, -self.boundingradius*4 - 15*i)
if not self.signal==Move.Signals.NONE:
rgb = Move.Signals.RGB[self.signal]
cr.set_source_rgb(rgb[0], rgb[1], rgb[2])
cr.translate(0, -self.boundingradius*5)
cr.arc(0, 0, 11, 0, 2 * math.pi)
cr.fill()
cr.translate(0, self.boundingradius*5)
cr.scale(2.0, 2.0)
cr.rotate(-theta)
#Sprite.drawViewCone(self, cr)
self.ai.decorateSprite(cr)
def getMove(self, worldstate):
robotstate = RobotState(self)
move = self.ai.getMove(robotstate, worldstate)
if(move == Move.FIRE_MISSILE):
if(self.missilesLeft>0):
self.missilesLeft -= 1
return move
else:
move = Move.NONE
if(move == Move.FIRE_LASER):
if(self.laser_overheated):
self.laser_cooldown -= 1
if(self.laser_cooldown==0): self.laser_overheated = False
move = Move.NONE
else:
self.laser_cooldown += 1
if(self.laser_cooldown==self.laser_max_cooldown): self.laser_overheated = True
return move
return move
class RobotAI:
def __init__(self):
return
def getMove(self, robotstate, worldstate):
return Move.NONE
def die(self):
pass
def gameOver(self):
pass
def decorateSprite(self, cr):
pass
|
[
"s.lovett7@gmail.com"
] |
s.lovett7@gmail.com
|
4a88fb8002e8e6cd2089fc2ddb41e462177f2426
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/content/course_overviews/signals.py
|
336724002a3625c2068016e8daaa0ca879009d50
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,198
|
py
|
"""
Signal handler for invalidating cached course overviews
"""
import logging
from django.dispatch import Signal
from django.dispatch.dispatcher import receiver
from openedx.core.djangoapps.signals.signals import COURSE_CERT_DATE_CHANGE
from xmodule.modulestore.django import SignalHandler
from .models import CourseOverview
LOG = logging.getLogger(__name__)
COURSE_START_DATE_CHANGED = Signal(providing_args=["updated_course_overview", "previous_start_date"])
COURSE_PACING_CHANGED = Signal(providing_args=["updated_course_overview", "previous_self_paced"])
@receiver(SignalHandler.course_published)
def _listen_for_course_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been published in Studio and
updates the corresponding CourseOverview cache entry.
"""
try:
previous_course_overview = CourseOverview.objects.get(id=course_key)
except CourseOverview.DoesNotExist:
previous_course_overview = None
updated_course_overview = CourseOverview.load_from_module_store(course_key)
_check_for_course_changes(previous_course_overview, updated_course_overview)
@receiver(SignalHandler.course_deleted)
def _listen_for_course_delete(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been deleted from Studio and
invalidates the corresponding CourseOverview cache entry if one exists.
"""
CourseOverview.objects.filter(id=course_key).delete()
def _check_for_course_changes(previous_course_overview, updated_course_overview):
if previous_course_overview:
_check_for_course_date_changes(previous_course_overview, updated_course_overview)
_check_for_pacing_changes(previous_course_overview, updated_course_overview)
_check_for_cert_availability_date_changes(previous_course_overview, updated_course_overview)
def _check_for_course_date_changes(previous_course_overview, updated_course_overview):
if previous_course_overview.start != updated_course_overview.start:
_log_start_date_change(previous_course_overview, updated_course_overview)
COURSE_START_DATE_CHANGED.send(
sender=None,
updated_course_overview=updated_course_overview,
previous_start_date=previous_course_overview.start,
)
def _log_start_date_change(previous_course_overview, updated_course_overview): # lint-amnesty, pylint: disable=missing-function-docstring
previous_start_str = 'None'
if previous_course_overview.start is not None:
previous_start_str = previous_course_overview.start.isoformat()
new_start_str = 'None'
if updated_course_overview.start is not None:
new_start_str = updated_course_overview.start.isoformat()
LOG.info('Course start date changed: course={} previous={} new={}'.format(
updated_course_overview.id,
previous_start_str,
new_start_str,
))
def _check_for_pacing_changes(previous_course_overview, updated_course_overview):
if previous_course_overview.self_paced != updated_course_overview.self_paced:
COURSE_PACING_CHANGED.send(
sender=None,
updated_course_overview=updated_course_overview,
previous_self_paced=previous_course_overview.self_paced,
)
def _check_for_cert_availability_date_changes(previous_course_overview, updated_course_overview):
""" Checks if the cert available date has changed and if so, sends a COURSE_CERT_DATE_CHANGE signal"""
if previous_course_overview.certificate_available_date != updated_course_overview.certificate_available_date:
LOG.info(
f"Certificate availability date for {str(updated_course_overview.id)} has changed from " +
f"{previous_course_overview.certificate_available_date} to " +
f"{updated_course_overview.certificate_available_date}. Sending COURSE_CERT_DATE_CHANGE signal."
)
COURSE_CERT_DATE_CHANGE.send_robust(
sender=None,
course_key=updated_course_overview.id,
available_date=updated_course_overview.certificate_available_date
)
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
aefb182162c8052fcdf262e269fe29c0d49373e5
|
db6489b122ce1853636b77dc2fee9f3a02ffbf5f
|
/blog/forms.py
|
d93751988f8e0ad9052d6adb9b35db6db3dec3e9
|
[] |
no_license
|
anamife/Blog_one
|
75566115bc3ac71ddaaef4beca8ed4dd637085f9
|
7acc27f0473bee196c1268e5d595fef00653efdb
|
refs/heads/master
| 2022-04-17T03:36:26.083785
| 2020-04-15T12:40:20
| 2020-04-15T12:40:20
| 255,912,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
from django import forms
from .models import Tag, Post, Comment
from django.core.exceptions import ValidationError
class TagForm(forms.ModelForm):
class Meta:
model = Tag
fields = ['title', 'slug']
widgets = {
'title':forms.TextInput(attrs={'class':'form-control'}),
'slug':forms.TextInput(attrs={'class':'form-control'}),
}
def clean_slug(self):
new_slug = self.cleaned_data['slug'].lower()
if new_slug == 'create':
raise ValidationError('Slug may not be "create"')
if Tag.objects.filter(slug__iexact=new_slug).count():
raise ValidationError('Slug must be unique')
return new_slug
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['title', 'slug', 'body', 'tags']
widgets = {
'title':forms.TextInput(attrs={'class':'form-control'}),
'slug':forms.TextInput(attrs={'class':'form-control'}),
'body':forms.Textarea(attrs={'class':'form-control'}),
'tags':forms.SelectMultiple(attrs={'class':'form-control'}),
}
def clean_slug(self):
new_slug = self.cleaned_data['slug'].lower()
if new_slug == 'create':
raise ValidationError('Slug may not be "create"')
return new_slug
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['text']
|
[
"nastya_feshcenko@mail.ru"
] |
nastya_feshcenko@mail.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.