blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a02ba1bdd360a58f588d063d7f1b9793a9d9412d
|
fb8a82d8cdedf9095b455e6457f312a90f791745
|
/10926_놀람.py
|
177d862f26381a0c0a9cb380b4377e12673eb15c
|
[] |
no_license
|
Napol-Napol/Practice-algorithm
|
07885a42205e9ff67403feaf6f65e2c40f1b0ab5
|
dca90b016cc1e3303e8ee8e42102655f06e256b6
|
refs/heads/master
| 2023-07-10T01:56:57.041349
| 2023-06-29T04:38:40
| 2023-06-29T04:38:40
| 279,951,280
| 0
| 1
| null | 2023-06-29T04:38:41
| 2020-07-15T18:49:28
|
C++
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
name = input()
print(name+"??!")
|
[
"gc9612@naver.com"
] |
gc9612@naver.com
|
c3814fd79b1a1d8c165a84db0088b1cace467d56
|
417e6eb589d3441c3c8b9901e2d35873dd35f097
|
/src/structural/observer.py
|
0ea844bf4d99f8480fb048987da3a1e944975507
|
[] |
no_license
|
vmgabriel/pattern-python
|
4fc6127ebdb521d0a4a7b10b4b68880f691ee630
|
74f1cd1314a79060d1df1a6df018c39572bc2b4c
|
refs/heads/master
| 2023-04-24T06:45:16.773415
| 2021-05-10T21:14:51
| 2021-05-10T21:14:51
| 365,394,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
"""Observer Pattern"""
# Libraries
from abc import ABCMeta, abstractmethod
class Publisher(metaclass=ABCMeta):
def add_observer(self, observer):
pass
def remove_observer(self, observer):
pass
def notify_all(self):
pass
def write_post(self, text):
pass
class PlatziForum(Publisher):
def __init__(self):
self.users_list = []
self.post = None
def add_observer(self, observer):
if observer not in self.users_list:
self.users_list.append(observer)
def remove_observer(self, observer):
self.users_list.remove(observer)
def notify_all(self):
for observer in self.users_list:
observer.notify(self.post)
def write_post(self, text):
self.post = text
self.notify_all()
class Subscriber:
def notify(self, post):
pass
class UserA(Subscriber):
def __init__(self):
pass
def notify(self, post):
print('User A ha sido notificado - {}'.format(post))
class UserB(Subscriber):
def __init__(self):
pass
def notify(self, post):
print('User B ha sido notificado - {}'.format(post))
if __name__ == '__main__':
foro = PlatziForum()
user1 = UserA()
user2 = UserB()
foro.add_observer(user1)
foro.add_observer(user2)
foro.write_post('Post en Platzi')
|
[
"vmgabriel96@gmail.com"
] |
vmgabriel96@gmail.com
|
04407c4c0c2cfb2ea993f25ad431233d54a4a666
|
8261e5d7497ed5625626c5bb727d58dce6b37f5c
|
/参考项目/生鲜/fresh/fresh/settings.py
|
4c5f03da73032f392d1ad09ef5a504ae9a9be5a9
|
[] |
no_license
|
zhonglimei001/shop
|
d66ebf987d15ddaf6abf262fa8cfc10ceb4c53e3
|
76c7ea8c021ef5fe1f3dfa12fc120a79d1fc7e74
|
refs/heads/master
| 2020-12-04T01:07:50.034728
| 2018-03-08T07:12:53
| 2018-03-08T07:12:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
"""
Django settings for fresh project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ysk)dd^p*zwh#8)=okwvpxgmld6vjo_gh5&e#cqas*a*4=zku*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'fresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
[
"794564669@qq.com"
] |
794564669@qq.com
|
154dd920bb3f0c68406acde29d4f1f926d2bcda0
|
a66a95e79a6f99e9324627203965d33bf4825697
|
/hast_gazebo/src/gazebo_logger.py
|
c6a320c61d8d38152188658d68b996b0563d1be0
|
[] |
no_license
|
benjaminabruzzo/idetc2019_code
|
e991bae4f7f94525e8f71509390954e976446fd3
|
e0b196558878bd142c37b72fc1e30bb1665f947e
|
refs/heads/master
| 2020-05-07T09:29:05.929147
| 2019-04-09T13:58:58
| 2019-04-09T13:58:58
| 180,377,895
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,245
|
py
|
#!/usr/bin/env python
# Import required Python code.
import cv2
import roslib
import rospy
import yaml
import tf
import message_filters
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import JointState, Imu
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from nav_msgs.msg import Path
from robot_plugins.msg import pid_controllers
import csv
import itertools
class gazebo_logger():
# Must have __init__(self) function for a class, similar to a C++ class constructor.
def __init__(self):
exp_date = rospy.get_param('~date')
str_date = str(exp_date)
print "gazebo_logger:: date:= " +str_date
exp_run = rospy.get_param('~run')
str_run = str(exp_run).zfill(3)
print "gazebo_logger:: run:= " + str_run
self.file_loggerfilename =('/home/benjamin/ros/data/csv/hast_{0}_{1}.csv').format(str_date,str_run)
self.file_logger = open(self.file_loggerfilename, 'w')
self.world_origin = rospy.get_param('~world_origin')
xf_list = rospy.get_param('~xf_array')
xf_names = rospy.get_param('~xf_names')
self.array_of_xf = xf_list.split(",")
self.array_of_xf_names = xf_names.split(",")
metronome_type = rospy.get_param('~metronome_type')
metronome_topic = rospy.get_param('~metronome_topic')
self.Frame = 0
if metronome_type == 'JointState':
self.metronome_sub = rospy.Subscriber(metronome_topic,JointState,self.metronome_callback)
elif metronome_type == 'Imu':
self.metronome_sub = rospy.Subscriber(metronome_topic,Imu,self.metronome_callback)
self.cfg_loggername = '/home/benjamin/ros/data/csv/hast_cfg_' + str_date + '_' + str_run + '.csv'
self.cfg_logger = open(self.cfg_loggername, 'w')
self.cfg_logger.write(str_date+'\n')
self.cfg_logger.write(str_run+'\n')
self.make_csv_headers()
for xf_item in self.array_of_xf:
print "gazebo_logger::logging xf : " + str(self.world_origin) + " to " + str(xf_item)
self.cfg_logger.write("gazebo_logger::logging xf : " + str(self.world_origin) + " to " + str(xf_item)+'\n')
self.xf_listener = tf.TransformListener()
record_pid = rospy.get_param('~record_pid')
if record_pid:
self.pid_filename = '/home/benjamin/ros/data/' + str_date + '/' + str_run + '/pid_' + str_run + '.m'
self.pid_logger = open(self.pid_filename, 'w')
self.pid_topic = rospy.get_param('~pid_topic')
print "gazebo_logger:: pid_logger:= " + self.pid_filename
print "gazebo_logger:: pid_topic:= " + self.pid_topic
self.pid_sub = rospy.Subscriber(self.pid_topic,pid_controllers,self.pid_callback)
self.pid_filealloc = '/home/benjamin/ros/data/' + str_date + '/' + str_run + '/pid_prealloc_' + str_run + '.m'
else:
print "gazebo_logger:: not logging gazebo PID "
while not rospy.is_shutdown():
rospy.spin()
self.RUN = str_run
self.DATE = str_date
print "self.processVicon()"
self.processVicon()
if record_pid:
self.pid_callalloc()
def processVicon(self):
# PATH = sys.argv[1]
# DATE = sys.argv[2]
# RUN = sys.argv[3]
OUTPATH = "/home/benjamin/ros/data/" + self.DATE + "/csv/"
datafile = self.file_loggerfilename
print datafile
f = open(datafile, 'rt')
datalist = []
i = 0;
try:
reader = csv.reader(f)
for row in reader:
i += 1
if i == 3:
objects = row
if i > 5:
datalist.append(row)
finally:
f.close()
stripped_objects = filter(None, objects)
filenames=[]
for x in stripped_objects:
y = x.split(":")
# print y[1]
filenames.append(OUTPATH+y[1]+ '_' + self.RUN + ".csv")
# print filenames
for x in range(0, len(filenames)):
f = open(filenames[x], 'wt')
try:
writer = csv.writer(f)
writer.writerow( ('Frame','RX', 'RY', 'RZ', 'RW', 'TX', 'TY', 'TZ') )
for i in range(len(datalist)-1):
writer.writerow( [datalist[i][0]] + datalist[i][(2+7*x):(9+7*x)])
finally:
f.close()
def pid_callalloc(self):
print " "
print "gazebo_logger:: shutting down... "
print " "
self.file_logger.close()
self.pid_logger.close()
msg_id = self.pid_data.id
self.pid_alloc = open(self.pid_filealloc, 'w')
self.pid_alloc.write("%% pid preallo\n\n")
self.pid_alloc.write("pid.time = zeros(" + str(msg_id) + ",1);\n")
for ctrl in self.pid_data.controllers:
self.pid_alloc.write("\npid." + ctrl.name + ".gains.pid = zeros(" + str(msg_id) + ",3);\n")
self.pid_alloc.write("pid." + ctrl.name + ".errors.pid = zeros(" + str(msg_id) + ",3);\n")
self.pid_alloc.write("pid." + ctrl.name + ".input = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.write("pid." + ctrl.name + ".state = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.write("pid." + ctrl.name + ".dinput = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.write("pid." + ctrl.name + ".output = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.write("pid." + ctrl.name + ".limit = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.write("pid." + ctrl.name + ".time_constant = zeros(" + str(msg_id) + ",1);\n")
self.pid_alloc.close()
def pid_callback(self,data):
self.pid_data = data
stamp = data.stamp
msg_id = data.id
self.pid_count = msg_id
self.pid_logger.write("\npid.time(" + str(msg_id) + ",1) = [" + str(stamp) + "];\n")
for ctrl in data.controllers:
self.pid_logger.write("pid." + ctrl.name + ".gains.pid(" + str(msg_id) + ",:) = [" + str(ctrl.gains.p) + ", " + str(ctrl.gains.i) + ", " + str(ctrl.gains.d) + "];\n")
self.pid_logger.write("pid." + ctrl.name + ".errors.pid(" + str(msg_id) + ",:) = [" + str(ctrl.errors.p) + ", " + str(ctrl.errors.i) + ", " + str(ctrl.errors.d) + "];\n")
self.pid_logger.write("pid." + ctrl.name + ".input(" + str(msg_id) + ",1) = [" + str(ctrl.input)+ "];\n")
self.pid_logger.write("pid." + ctrl.name + ".state(" + str(msg_id) + ",1) = [" + str(ctrl.state)+ "];\n")
self.pid_logger.write("pid." + ctrl.name + ".dinput(" + str(msg_id) + ",1) = [" + str(ctrl.dinput)+ "];\n")
self.pid_logger.write("pid." + ctrl.name + ".output(" + str(msg_id) + ",1) = [" + str(ctrl.output)+ "];\n")
self.pid_logger.write("pid." + ctrl.name + ".limit(" + str(msg_id) + ",1) = [" + str(ctrl.limit)+ "];\n")
self.pid_logger.write("pid." + ctrl.name + ".time_constant(" + str(msg_id) + ",1) = [" + str(ctrl.time_constant)+ "];\n")
def make_csv_headers(self):
header_line1 = 'Objects'
header_line2 = '100'
header_line3 = ',,'
header_line4 = 'Frame,Sub Frame,'
header_line5 = ',,'
for xf in self.array_of_xf_names:
header_line3 += 'Global Angle (Quaternion) ' + xf + ':' + xf + ',,,,,,,'
header_line4 += 'RX,RY,RZ,RW,TX,TY,TZ,'
header_line5 += ',,,,mm,mm,mm,'
self.file_logger.write(header_line1 + '\n')
self.file_logger.write(header_line2 + '\n')
self.file_logger.write(header_line3 + '\n')
self.file_logger.write(header_line4 + '\n')
self.file_logger.write(header_line5 + '\n')
def metronome_callback(self,data):
# using the metronome topic, record all xfs
stamp = data.header.stamp
time = stamp.to_sec()
time_f = '%.4f' % time
self.Frame += 1
data_line = str(self.Frame) + ',0,'
for xf in self.array_of_xf:
# compute transform
# print "gazebo_logger::logging xf : " + str(self.world_origin) + " to " + str(xf)
try:
(xf_t,xf_x) = self.xf_listener.lookupTransform(self.world_origin, xf, rospy.Time(0))
except :
xf_t = [0,0,0]
xf_x = [0,0,0,0]
# print "gazebo_logger::except on " + xf
# print "gazebo_logger::logging " + xf + " as: " + str(xf_x[0]) + ',' + str(xf_x[1]) + ',' + str(xf_x[2]) + ',' + str(xf_x[3]) + ',' + str(xf_t[0]*1000) + ',' + str(xf_t[1]*1000) + ',' + str(xf_t[2]*1000) + ','
# write data
data_line += str(xf_x[0]) + ','
data_line += str(xf_x[1]) + ','
data_line += str(xf_x[2]) + ','
data_line += str(xf_x[3]) + ','
# xf_t * 1000 converts m to mm
data_line += str(xf_t[0]*1000) + ','
data_line += str(xf_t[1]*1000) + ','
data_line += str(xf_t[2]*1000) + ','
self.file_logger.write(data_line + '\n')
if __name__ == '__main__':
# Initialize the node and name it.
rospy.init_node('gazebo_logger')
# Go to class functions that do all the heavy lifting. Do error checking.
try:
log = gazebo_logger()
except rospy.ROSInterruptException: pass
|
[
"abruzzo2@gmail.com"
] |
abruzzo2@gmail.com
|
cf80066c8c1ffe7e982e03699686c97fbdc57858
|
4f5142b5aef2d854110e38eaf26900109b82e9e2
|
/try_11_for insert data and create collection.py
|
2d15b98552ef745a0617ccebc0b6994bd80ae8b8
|
[] |
no_license
|
lll-Mike-lll/botstock
|
baa24dac67b4af4b17f24ce5f8f7bd4f44fb9f78
|
b1b168f9e8ef9d63a97369760d29ea3d0b7544fd
|
refs/heads/master
| 2020-06-10T07:17:58.743776
| 2019-07-31T08:51:45
| 2019-07-31T08:51:45
| 193,616,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 08:21:07 2019
@author: Lenovo
"""
import pymongo as pm
cli = pm.MongoClient()
db = cli['stock']
for i in range(6):
coll = db['list'+str(i)]
data = {'no':i+10}
coll.insert_one(data)
|
[
"wuttinun.code@gmail.com"
] |
wuttinun.code@gmail.com
|
41b415f39a4e7aa21b0dd6f22df937506a667106
|
6ab851422ff96d236d054a34e90b321985adc307
|
/2126.py
|
54c5169ffd27308425636c9f18159af50c4378fe
|
[] |
no_license
|
glaucodasilva/PythonURIJudge
|
65712ef6d5e703ae1c6a5e8ee21c8e51a1e17d93
|
a3411ffd7921a84ac173411eaf38081cd0185459
|
refs/heads/master
| 2020-04-08T14:09:50.252637
| 2019-01-13T03:22:09
| 2019-01-13T03:22:09
| 159,424,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
caso = 0
while True:
try:
n1 = input()
tn1 = len(n1)
n2 = input()
caso += 1
sub = 0
for i in range(len(n2)):
if n2[i:i+tn1] == n1:
sub += 1
pos = i + 1
if sub > 0:
print('Caso #%d:' %caso)
print('Qtd.Subsequencias: %d' %sub)
print('Pos: %d' %pos)
print('')
else:
print('Caso #%d:' %caso)
print('Nao existe subsequencia')
print('')
except EOFError:
break
|
[
"glaucodasilva@live.com"
] |
glaucodasilva@live.com
|
73bb5d61351977675a9d0a5e2f4b1565d14ad573
|
60d6fa9b911f246560835c5c30da62afdb3fbc11
|
/station_statisticsV3.py
|
aeb927d68d6aacfce98a3d844d4b628b914bcc88
|
[] |
no_license
|
MichaelLee826/TS_data_analysis
|
2d09f12cdbaf688851b397bdcda1df15acd9abdb
|
6d9da7f558b560b592ee143f1a4784e719673ec6
|
refs/heads/master
| 2020-05-13T19:07:09.726931
| 2019-04-24T03:14:05
| 2019-04-24T03:14:05
| 181,651,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
import os
import csv
def process():
# 输出文件
station_dict = {"3": "东方绿洲站下行站台", "4": "东方绿洲站上行站台", "8": "朱家角站下行站台", "9": "朱家角站上行站台",
"12": "淀山湖大道站下行站台", "13": "淀山湖大道站上行站台", "20": "赵巷站下行站台", "22":"徐泾北城站下行站台",
"23": "徐泾北城站上行站台", "26": "诸光路站上行站台", "29": "虹桥火车站下行站台", "32": "虹桥火车站上行站台",
"33": "漕盈路站下行站台", "34": "青浦新城站下行站台", "35": "汇金路站下行站台", "36": "嘉松中路站下行站台",
"37": "徐盈路站下行站台", "38": "蟠龙路站下行站台", "39": "诸光路站下行站台", "40": "漕盈路站上行站台",
"41": "青浦新城站上行站台", "42": "汇金路站上行站台", "43": "赵巷站上行站台", "44": "嘉松中路站上行站台",
"45": "徐盈路站上行站台", "46": "蟠龙路站上行站台"}
station_names = list(station_dict.values())
station_files = [file for file in range(station_dict.__len__())]
station_writers = [writer for writer in range(station_dict.__len__())]
for i in range(station_dict.__len__()):
station_files[i] = open("F:\\上海地铁原始数据\\站台统计数据_abc" + "\\" + station_names[i] + ".csv", 'a', newline='')
station_writers[i] = csv.writer(station_files[i], dialect='excel')
# 输入文件
base_path = "F:\\上海地铁原始数据\\列车合并数据结果"
file_list = os.listdir(base_path)
for file in file_list:
file_path = os.path.join(base_path, file)
print(file_path)
csv_file = open(file_path, 'r', encoding='utf-8')
train_no = os.path.basename(file)[4:8]
flag = False
line = ''
index = -1
for one_line in csv_file:
# 增加列车号
item = (train_no + "," + one_line).strip().split(',')
station_code = item[38]
if station_code == "CY1" or station_code == "0":
if flag is False:
continue
else:
station_writers[index].writerow(line)
flag = False
elif station_code in station_dict.keys():
flag = True
line = item
station_name = station_dict.get(station_code)
index = station_names.index(station_name)
csv_file.close()
for i in range(station_dict.__len__()):
station_files[i].close()
if __name__ == '__main__':
process()
|
[
"michaelleef2008@gmail.com"
] |
michaelleef2008@gmail.com
|
2281dc7d58f87cd50fac735a74771b84d58ba8c3
|
55a311eb02403f8fa7e4d1e148e9bb5e80619984
|
/businessLogic/methods/updateEntryQuery.py
|
c4c91ca1c79619937408f6a8dbd731469235be5e
|
[
"Apache-2.0"
] |
permissive
|
kethan-kumar/Custom_Database
|
f5f3e5478a6b4c66f88fde5305f822ebacdb6ca2
|
0d44ab8748631b806b3711b8ae7d68e5f6474b1d
|
refs/heads/main
| 2023-05-07T15:12:27.703962
| 2021-05-12T11:37:35
| 2021-05-12T11:37:35
| 365,336,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
import pandas as pd
from businessLogic.methods import ConcurrencyControl
global filepath, filepath_slash
filepath = "C:\\Users\\kethan\\Documents\\databases"
filepath_slash = "\\"
def updateValuesinTable(query, path, database, tableName):
con_status = False
try:
con_status = ConcurrencyControl.concurrency_control(database, tableName, True)
if con_status == True:
pathTable = path + filepath_slash + tableName
df = pd.read_csv(pathTable)
attributeToUpdate = query[0]['attributeN'][0]['attribute']
valueToInsert = query[0]['attributeN'][0]['value']
whereAttribute = query[0]['whereAttribute']
whereValue = query[0]['whereVariable']
indexList = df.index[df[whereAttribute] == whereValue].tolist()
for i in indexList:
df.at[i, attributeToUpdate] = valueToInsert
df.to_csv(pathTable, index=False)
finally:
if con_status == True:
ConcurrencyControl.concurrency_control(database, tableName, False)
return 0
|
[
"kethankumar.nasapu@gmail.com"
] |
kethankumar.nasapu@gmail.com
|
62ee73f02dacd7db1d18db9daf85fad9474cc82a
|
c7f08cadd13b6a08dcbb279189ffbc3155e9cd14
|
/models/base_joint_trans.py
|
6933ee6398bc3bd20b229a736c891e28268afbb6
|
[] |
no_license
|
pranavpawar3/dialog-nlu
|
8172c65456d792dc1a963affade63c37fd8f2d2a
|
c5fd9a6f9a425d443e4b22af5709af43115e772d
|
refs/heads/master
| 2022-12-11T22:45:23.844852
| 2020-09-03T10:35:42
| 2020-09-03T10:35:42
| 292,539,199
| 0
| 0
| null | 2020-09-03T10:34:34
| 2020-09-03T10:34:34
| null |
UTF-8
|
Python
| false
| false
| 4,868
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 09:22:32 2020
@author: mwahdan
"""
import tensorflow as tf
from models.nlu_model import NLUModel
import numpy as np
import os
import json
from .callbacks import F1Metrics
class BaseJointTransformerModel(NLUModel):
def __init__(self, config, trans_model=None, is_load=False):
self.slots_num = config.get('slots_num')
self.intents_num = config.get('intents_num')
self.pretrained_model_name_or_path = config.get('pretrained_model_name_or_path')
self.cache_dir = config.get('cache_dir', None)
self.from_pt = config.get('from_pt', False)
self.num_bert_fine_tune_layers = config.get('num_bert_fine_tune_layers', 10)
self.intent_loss_weight = config.get('intent_loss_weight', 1.0)
self.slots_loss_weight = config.get('slots_loss_weight', 3.0)
self.model_params = config
if not is_load:
self.trans_model = trans_model
self.build_model()
self.compile_model()
def compile_model(self):
# Instead of `using categorical_crossentropy`,
# we use `sparse_categorical_crossentropy`, which does expect integer targets.
optimizer = tf.keras.optimizers.Adam(lr=5e-5)#0.001)
losses = {
'slots_tagger': 'sparse_categorical_crossentropy',
'intent_classifier': 'sparse_categorical_crossentropy',
}
loss_weights = {'slots_tagger': self.slots_loss_weight, 'intent_classifier': self.intent_loss_weight}
metrics = {'intent_classifier': 'acc'}
self.model.compile(optimizer=optimizer, loss=losses, loss_weights=loss_weights, metrics=metrics)
self.model.summary()
def build_model(self):
raise NotImplementedError()
def save(self, model_path):
raise NotImplementedError()
def load(load_folder_path):
raise NotImplementedError()
def fit(self, X, Y, validation_data=None, epochs=5, batch_size=32,
id2label=None):
"""
X: batch of [input_ids, input_mask, segment_ids, valid_positions]
"""
X = (X[0], X[1], X[2], self.prepare_valid_positions(X[3]))
if validation_data is not None:
X_val, Y_val = validation_data
validation_data = ((X_val[0], X_val[1], X_val[2], self.prepare_valid_positions(X_val[3])), Y_val)
callbacks = [F1Metrics(id2label, validation_data=validation_data)]
history = self.model.fit(X, Y, validation_data=validation_data,
epochs=epochs, batch_size=batch_size,
callbacks=callbacks)
self.visualize_metric(history.history, 'slots_tagger_loss')
self.visualize_metric(history.history, 'intent_classifier_loss')
self.visualize_metric(history.history, 'loss')
self.visualize_metric(history.history, 'intent_classifier_acc')
def prepare_valid_positions(self, in_valid_positions):
in_valid_positions = np.expand_dims(in_valid_positions, axis=2)
in_valid_positions = np.tile(in_valid_positions, (1, 1, self.slots_num))
return in_valid_positions
def predict_slots_intent(self, x, slots_vectorizer, intent_vectorizer, remove_start_end=True,
include_intent_prob=False):
valid_positions = x[3]
x = (x[0], x[1], x[2], self.prepare_valid_positions(valid_positions))
y_slots, y_intent = self.predict(x)
slots = slots_vectorizer.inverse_transform(y_slots, valid_positions)
if remove_start_end:
slots = [x[1:-1] for x in slots]
if not include_intent_prob:
intents = np.array([intent_vectorizer.inverse_transform([np.argmax(i)])[0] for i in y_intent])
else:
intents = np.array([(intent_vectorizer.inverse_transform([np.argmax(i)])[0], round(float(np.max(i)), 4)) for i in y_intent])
return slots, intents
def save_to_path(self, model_path, trans_model_name):
self.model_params["class"] = self.__class__.__name__
with open(os.path.join(model_path, 'params.json'), 'w') as json_file:
json.dump(self.model_params, json_file)
self.model.save(os.path.join(model_path, trans_model_name))
def load_model_by_class(klazz, load_folder_path, trans_model_name):
with open(os.path.join(load_folder_path, 'params.json'), 'r') as json_file:
model_params = json.load(json_file)
new_model = klazz(model_params, trans_model=None, is_load=True)
new_model.model = tf.keras.models.load_model(os.path.join(load_folder_path, trans_model_name))
return new_model
|
[
"mahmoud.a.wahdan@gmail.com"
] |
mahmoud.a.wahdan@gmail.com
|
02c6cf6316d9fba40d964e94fe32779ecdb147ba
|
d5a3aa96b30a5a6a355b4e004e494a6ef41a339c
|
/dataviz/flagsuscities.py
|
8d13d262bdf8130e129cc5277372acdda3014e45
|
[
"MIT"
] |
permissive
|
Udzu/pudzu
|
4c1c134503f62fd1cc08a56e257b864033b38561
|
df5019802bc32064870f31cda8397ad14868cda0
|
refs/heads/master
| 2023-07-10T06:16:35.342990
| 2023-07-04T06:28:00
| 2023-07-04T06:28:00
| 97,936,607
| 120
| 28
|
MIT
| 2021-02-21T16:15:31
| 2017-07-21T10:34:16
|
Roff
|
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
from pudzu.charts import *
df = pd.read_csv("datasets/flagsuscities.csv")
groups = list(remove_duplicates(df.group))
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups]
data = pd.DataFrame(array, index=list(remove_duplicates(df.group)))
FONT = calibri
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "{}".format(description) if description else " "
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=198) if flag.width / flag.height < 1.3 else flag.resize((318,198))
flag = flag.pad(1, "grey")
return Image.from_column([
Image.from_text(d['name'], FONT(32, bold=True), beard_line=True, fg=fg),
Image.from_text(description, FONT(24, italics=True), fg=fg),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text("Selected US city flags".upper(), FONT(80, bold=True), fg=fg, bg=bg).pad(40, bg)
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=(0.5,0.5,0.5), row_label=lambda r: None if data.index[r].startswith("_") else Image.from_text(data.index[r].replace(r"\n","\n").upper(), FONT(32, bold=True), align="center"))
img = Image.from_column([title, grid, Rectangle((0,40))], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagsuscities.png")
|
[
"uri.zarfaty@gmail.com"
] |
uri.zarfaty@gmail.com
|
19671f3686631379163ed2b10f584200efc261bf
|
9403a2dc1ae4a15a10466aa929af403b94426f8d
|
/10-进程和线程/test.py
|
8314a540c720001679ebc5c9d11623cf44336f98
|
[] |
no_license
|
IceBlueX/py
|
8efaed3805eaeea0e1909ebd416f22f75fa2fa86
|
0cfcbfcf30f6d8c0da41a9ebbc3c92b06e821666
|
refs/heads/master
| 2021-06-24T21:04:24.118942
| 2019-08-06T06:15:38
| 2019-08-06T06:15:38
| 159,289,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from multiprocessing import Process, Queue
import os, time, random
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C', 'D', 'E']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue' % value)
if __name__ == '__main__':
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
pw.start()
pr.start()
pw.join()
pr.terminate()
|
[
"baokun.li@orgneering.com"
] |
baokun.li@orgneering.com
|
8aa8e5ab07a85782e717ab582bad1cf7628c4502
|
2779605165853981c961db1ad9f1d2faa8b4e709
|
/includes/wordforms.py
|
7675f681c2b8151dc7336da7f3095a58ddab4bf5
|
[
"Apache-2.0"
] |
permissive
|
lebedevsergey/poet-ex-machina
|
5de7a747818d4db2a8968d011fd20fc39973a590
|
94e94dcce06be027248f9071d995659be200d827
|
refs/heads/master
| 2020-05-27T21:22:08.698749
| 2019-09-22T16:23:02
| 2019-09-22T16:23:02
| 83,671,293
| 42
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Sergey Lebedev
# Licensed under the Apache License, Version 2.0
import re
class wordFormsDict:
WORD_FORMS_DATA_DIVIDER = ','
WORD_FORMS_FILE = "dictonaries/odict.csv"
wordForms = {}
wordMainForms = {}
def __init__(self):
self.__loadDict(self.WORD_FORMS_FILE)
def findWordForm(self, wordForm):
if wordForm in self.wordForms:
return self.wordForms[wordForm]
return None
def __loadDict(self, filename):
with open(filename, 'r', encoding='cp1251') as f:
count = 0
while True:
line = f.readline()
line = line.strip()
if not line:
break
data = re.split(self.WORD_FORMS_DATA_DIVIDER, line)
self.wordMainForms[count] = data[0]
mainForm = self.wordMainForms[count]
count = count + 1
self.wordForms[data[0]] = mainForm
if len(data) <= 2:
continue
for i in range(2, len(data)):
self.wordForms[data[i]] = mainForm
|
[
"diacomltd@mail.ru"
] |
diacomltd@mail.ru
|
c1c819095b0fa0a1b0f9af06e428c396c3e6f389
|
834eec52f4f75f881b957efcce5aa2ac987d28cd
|
/HTTP API.py
|
242f79523ec7776309c175f408c31a80dc0039c3
|
[] |
no_license
|
kenigteh/KODE
|
15d42a25f443e3c652cf435285e008265b01b905
|
660c321b0d3131362bc2a168e365262e7c5dc773
|
refs/heads/master
| 2020-04-27T02:59:21.665558
| 2019-03-11T21:57:00
| 2019-03-11T21:57:00
| 174,011,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,622
|
py
|
import traceback
import DataBase
import threading
from time import sleep
import requests
import flask
from flask import request
from flask_mail import Mail, Message
app = flask.Flask(__name__)
# Данные для почты администратора
my_email = ""
password = ""
# Проверить, строка == дробное число
def check_float(number):
try:
float(number)
return True
except:
return False
# Метод подписки на изменения
@app.route('/subscription', methods=["POST"])
def add_ticker():
if "email" in request.form and "ticker" in request.form and (
"max_price" in request.form or "min_price" in request.form):
email = request.form["email"]
ticker = request.form["ticker"]
max_price = float(request.form["max_price"]) if "max_price" in request.form and check_float(
request.form["max_price"]) else 10 ** 12
min_price = float(request.form["min_price"]) if "min_price" in request.form and check_float(
request.form["min_price"]) else 0
if max_price or min_price:
if email not in DataBase.users:
DataBase.users[email] = {}
if len(DataBase.users[email]) < 5:
DataBase.users[email][ticker] = {"max_price": max_price,
"min_price": min_price}
if ticker not in DataBase.help_table:
DataBase.help_table[ticker] = []
if email not in DataBase.help_table[ticker]:
DataBase.help_table[ticker].append(email)
print(DataBase.users[email])
return "Succes"
else:
return "Error! You haw max subscribes now!"
else:
return "Several arguments lost"
# Функция удаления ожидания изменений
@app.route('/subscription', methods=["DELETE"])
def del_ticker():
if "email" in request.form:
email = request.form["email"]
if email in DataBase.users:
# Если есть тикер, то удаляем его
if "ticker" in request.form:
if request.form["ticker"] in DataBase.users[email]:
DataBase.users[email].pop(request.form["ticker"])
DataBase.help_table[request.form["ticker"]].pop(email)
else:
return "У вас не было такого тикета"
# Иначе удаляем всё
else:
for key in DataBase.users[email]:
DataBase.help_table[key].pop(email)
DataBase.users[email] = {}
return "Удаление прошло успешно"
else:
return "Пользователь не найден"
else:
return "Вы забыли указать email"
# Получить course
def get_cource(symbol):
try:
data = requests.get(
f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey=S7KY9FML01GT9X2W").json()
return data
except:
return {"Global Quote": 0}
"""
status
0 - Больше нужного значения
1 - Меньше нужного значения
"""
# Функция для отправки сообщений
def send_email(email, symbol, status):
word = ["Больше нужного значения", "Меньше нужного значения"]
text = f"{symbol} {word[status]}"
try:
mail = Mail(app)
with app.app_context():
msg = Message(subject="Изменение котировок",
sender=app.config.get("MAIL_USERNAME"),
recipients=[email], # replace with your email for testing
body=text)
mail.send(msg)
except:
print('Something went wrong...')
traceback.print_exc()
# Функция, отвечающая за проверку котировок
def send_tickers():
while True:
# Проходимся по всем акциям
for symbol in list(DataBase.help_table.keys())[::-1]:
# Получаем пользователей, которые отслеживают курс
users = DataBase.help_table[symbol]
# Получаем курс, пришедший от api
cource = get_cource(symbol)
if "Global Quote" in cource and cource["Global Quote"] and "02. open" in cource["Global Quote"]:
# Вытаскиваем курс из словаря
price = float(cource["Global Quote"]["02. open"])
# Создаём список пользователей, которым больше не наддо отправлять изменение по акции
user_for_del = []
for user_email in users:
# Доп. проверка
if symbol in DataBase.users[user_email]:
if price > DataBase.users[user_email][symbol]["max_price"]:
send_email(user_email, symbol, 0)
user_for_del.append(user_email)
elif price < DataBase.users[user_email][symbol]["min_price"]:
send_email(user_email, symbol, 1)
user_for_del.append(user_email)
# Удаляем пользователей, которым отправили изменения
DataBase.help_table[symbol] = list(set(users) - set(user_for_del))
if not DataBase.help_table[symbol]:
DataBase.help_table.pop(symbol)
sleep(10)
if __name__ == '__main__':
# Подгружаем данные для авторизации в почте
my_email = DataBase.my_email
password = DataBase.password
# Настраиваем smtp протокол
mail_settings = {
"MAIL_SERVER": 'smtp.gmail.com',
"MAIL_PORT": 465,
"MAIL_USE_TLS": False,
"MAIL_USE_SSL": True,
"MAIL_USERNAME": my_email,
"MAIL_PASSWORD": password
}
app.config.update(mail_settings)
# Создаём отдельный поток, который получает обновления по акциям и отправляет сообщения
t1 = threading.Thread(target=send_tickers)
t1.start()
app.run(host='0.0.0.0', port=4567)
|
[
"34108059+buispro@users.noreply.github.com"
] |
34108059+buispro@users.noreply.github.com
|
140ae757e445ed1d7b00ea50be8cec6572a71591
|
ec6e5746d81c4cda00640e35c302435e3aa6ea54
|
/download.py
|
22b086704eefdf05147490a997f82dd02d611d23
|
[] |
no_license
|
tinyhhj/began
|
cdb437fd98e18556621b93a37db6c91a33eeba2d
|
e12e4330b4e425aa43fe454dc62a53935eb6e635
|
refs/heads/master
| 2021-02-13T14:21:15.732736
| 2020-04-01T08:38:07
| 2020-04-01T08:38:07
| 244,703,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,627
|
py
|
"""
Modification of
- https://github.com/carpedm20/DCGAN-tensorflow/blob/master/download.py
- http://stackoverflow.com/a/39225039
"""
from __future__ import print_function
import os
import zipfile
import requests
from tqdm import tqdm
import argparse
import time
def download_file_from_google_drive_by_id(id, destination):
URL = 'https://docs.google.com/uc?export=download'
session = requests.Session()
response = session.get(URL, params={'id':id}, stream=True)
token = get_confirm_token(response)
if token:
print(f'[!] token exists {token}')
params = {'id':id, 'confirm':token}
response = session.get(URL, params= params, stream=True)
save_response_content(response, destination)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id': id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, chunk_size=32*1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size,
unit='B', unit_scale=True, desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip(filepath):
print("Extracting: " + filepath)
base_path = os.path.dirname(filepath)
with zipfile.ZipFile(filepath) as zf:
zf.extractall(base_path)
os.remove(filepath)
def download_from_drive(basepath, filename, drive_id):
assert len(os.path.splitext(filename)) == 2 , f'filename doesnt have ext {filename}'
assert os.path.exists(basepath), f'basepath doesnt exists {basepath}'
dirname, _ = os.path.splitext(filename)
data_path = os.path.join(basepath, dirname)
os.makedirs(data_path, exist_ok=True)
save_path = os.path.join(basepath, filename)
if os.path.exists(save_path):
print(f'{save_path} exists skip download ')
else:
download_file_from_google_drive_by_id(drive_id, save_path)
if os.path.exists(data_path):
print(f'{data_path} exists skip unzip file ')
else:
time.sleep(5)
with zipfile.ZipFile(save_path) as zf:
zf.extractall(basepath)
# move dir to data_path
extract_dir = list(filter(os.path.isdir, [os.path.join(basepath,f) for f in os.listdir(basepath)]))
extract_dir.sort(key= lambda x:os.path.getctime(x), reverse=True)
if extract_dir[0] != data_path:
os.rename(extract_dir[0], data_path)
# os.remove(save_path)
def download_celeb_a(base_path):
data_path = os.path.join(base_path, 'CelebA')
images_path = os.path.join(data_path, 'images')
if os.path.exists(data_path):
print('[!] Found Celeb-A - skip')
return
filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
save_path = os.path.join(base_path, filename)
if os.path.exists(save_path):
print('[*] {} already exists'.format(save_path))
else:
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(base_path)
if not os.path.exists(data_path):
os.mkdir(data_path)
os.rename(os.path.join(base_path, "img_align_celeba"), images_path)
os.remove(save_path)
def prepare_data_dir(path = './data'):
if not os.path.exists(path):
os.mkdir(path)
# check, if file exists, make link
def check_link(in_dir, basename, out_dir):
in_file = os.path.join(in_dir, basename)
if os.path.exists(in_file):
link_file = os.path.join(out_dir, basename)
rel_link = os.path.relpath(in_file, out_dir)
os.symlink(rel_link, link_file)
def add_splits(base_path,dir_name):
data_path = os.path.join(base_path, dir_name)
# images_path = os.path.join(data_path, 'images')
train_dir = os.path.join(data_path, 'train','class')
valid_dir = os.path.join(data_path, 'valid','class')
test_dir = os.path.join(data_path, 'test','class')
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(valid_dir):
os.makedirs(valid_dir)
if not os.path.exists(test_dir):
os.makedirs(test_dir)
# these constants based on the standard CelebA splits
NUM_EXAMPLES = 202599
TRAIN_STOP = 162770
VALID_STOP = 182637
for i in range(0, TRAIN_STOP):
basename = "{:06d}.jpg".format(i+1)
check_link(data_path, basename, train_dir)
for i in range(TRAIN_STOP, VALID_STOP):
basename = "{:06d}.jpg".format(i+1)
check_link(data_path, basename, valid_dir)
for i in range(VALID_STOP, NUM_EXAMPLES):
basename = "{:06d}.jpg".format(i+1)
check_link(data_path, basename, test_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path',type=str, default='data')
args = parser.parse_args()
prepare_data_dir(args.path)
download_from_drive(args.path, 'img_align_celeba.zip','0B7EVK8r0v71pZjFTYXZWM3FlRnM')
add_splits(args.path, 'img_align_celeba')
|
[
"krwsim@gmail.com"
] |
krwsim@gmail.com
|
46b0bb07d7705bd5f2793a879f995b8b719136d5
|
cc4640f3540250ba1ec749e989c7044270ade09e
|
/Orders/models.py
|
ee5aa9ccacf4d1ae6c01b18625822bd7197ddcd6
|
[] |
no_license
|
nusk003/royal_marketing_api
|
aa86d0ca40067c2a847080d06acf975eca8827bc
|
e311bc0bfddfd4aa3aa4f656c94c7b76ca2446ec
|
refs/heads/master
| 2022-12-14T03:03:29.572632
| 2019-04-02T02:18:56
| 2019-04-02T02:18:56
| 178,981,569
| 0
| 0
| null | 2021-09-08T00:55:45
| 2019-04-02T02:16:31
|
Python
|
UTF-8
|
Python
| false
| false
| 6,033
|
py
|
from django.db import models
from django.db.models import Sum,Count,Avg,F
from User.models import (User,PromoCodes)
from Products.models import (
ProductCombinations,
ProductVendor,OfferProductVendors)
from optimized_image.fields import OptimizedImageField
# Create your models here.
class PaymentType (models.Model):
paymentTypeId = models.AutoField(primary_key = True)
paymentType = models.CharField(max_length = 20)
deletePaymentType = models.BooleanField(default=False)
class ExpressCheckout (models.Model):
expressCheckoutId = models.AutoField(primary_key = True)
customerId = models.ForeignKey(User,on_delete=models.CASCADE,related_name="ExpressCheckouts")
image = OptimizedImageField(upload_to = "ExpressCheckouts/")
invoiceNo = models.CharField(max_length = 20)
orderStatus = models.IntegerField()
deleteExpress = models.BooleanField(default=False)
class Orders (models.Model):
orderId = models.AutoField(primary_key = True)
customerId = models.ForeignKey(User,on_delete=models.CASCADE,related_name="Orders")
invoiceNo = models.CharField(max_length = 12)
isExpressCheckout = models.BooleanField(default=False)
expressCheckoutId = models.ForeignKey(ExpressCheckout,on_delete=models.CASCADE,blank = True ,null = True)
orderDate = models.DateTimeField(auto_now=True)
orderStatus = models.IntegerField()
deliverAddress = models.CharField(max_length = 50,default="Main Road")
promoCodeId = models.ForeignKey(PromoCodes,on_delete=models.CASCADE,related_name="Orders",blank=True,null = True)
#dueSellPrice = models.DecimalField(max_digits=10,decimal_places=2,default = 200.00)
#dueCostPrice = models.DecimalField(max_digits=10,decimal_places=2,default = 200.00)
discountPrice = models.DecimalField(max_digits=10,decimal_places=2,default = 200.00)
paymentType = models.ForeignKey(PaymentType,on_delete=models.CASCADE,related_name="Orders",default=1)
deleteOrder = models.BooleanField(default=False)
cancelReason = models.CharField(max_length = 100,blank = True ,null = True)
def get_totalSellPrice (self) :
try:
ops = OrderProducts.objects.filter(orderId = self,deleteOrderProduct = False,isCancel =False)
totalPrice = 0
for op in ops:
if op.discountPrice is not None:
totalPrice += (op.discountPrice*op.qty)
else:
totalPrice += (op.sellPrice*op.qty)
return totalPrice
except:
return 0
def get_totalCostPrice (self) :
total = OrderProducts.objects.filter(orderId = self,deleteOrderProduct = False,isCancel =False).annotate(totalPrice = Sum(F('costPrice') * F('qty'),output_field=models.DecimalField(max_digits=10,decimal_places=2)))
return total.first().totalPrice
class OrderProducts(models.Model):
orderProductId = models.AutoField(primary_key=True)
orderId = models.ForeignKey(Orders,on_delete=models.CASCADE,related_name="OrderProducts")
proVendorId = models.ForeignKey(ProductVendor,on_delete=models.CASCADE,related_name="ProductVendors")
qty = models.PositiveIntegerField()
costPrice = models.DecimalField(max_digits=7,decimal_places=2)
sellPrice = models.DecimalField(max_digits=7,decimal_places=2)
isCancel = models.BooleanField(default=False)
discountPrice = models.DecimalField(max_digits=7, decimal_places=2, blank = True,null = True)
offerProductVendorId = models.ForeignKey(OfferProductVendors,on_delete=models.CASCADE,related_name="OrderProducts",blank=True,null=True)
deleteOrderProduct = models.BooleanField(default=False)
cancelReason = models.CharField(max_length = 100,blank = True ,null = True)
class Delivery(models.Model):
deleveryId = models.AutoField(primary_key = True)
orderId = models.ForeignKey(Orders,on_delete=models.CASCADE,related_name="Delivery")
riderId = models.ForeignKey(User,on_delete=models.CASCADE,related_name="Deliveries")
dispatchedDate = models.DateTimeField()
deliveredDate = models.DateTimeField()
note = models.CharField(max_length = 500)
deleteDelivery = models.BooleanField(default=False)
class ReturnProducts(models.Model):
returnId = models.AutoField(primary_key = True)
orderId = models.ForeignKey(Orders,on_delete=models.CASCADE,related_name="ReturnProducts")
productVendorId = models.ForeignKey(ProductVendor,on_delete=models.CASCADE,related_name="Returns")
qty = models.PositiveIntegerField()
reason = models.CharField(max_length=500)
deleteReturn = models.BooleanField(default=False)
class ComplaintType (models.Model):
complaintTypeId = models.AutoField(primary_key = True)
complaintType = models.CharField(max_length = 20)
deleteType = models.BooleanField(default=False)
class Complaints (models.Model):
complaintId = models.AutoField(primary_key = True)
customerId = models.ForeignKey(User,on_delete=models.CASCADE,related_name="Complaints")
complaintType = models.ForeignKey(ComplaintType,on_delete=models.CASCADE,related_name="Complaints")
complaintBody = models.CharField(max_length = 500)
complaintStatus = models.IntegerField()
complaintDate = models.DateTimeField(auto_now=True)
deleteComplaint = models.BooleanField(default=False)
class RequestType (models.Model):
requestTypeID = models.AutoField(primary_key = True)
requestType = models.CharField(max_length = 20)
deleteRequestType = models.BooleanField(default=False)
class Requests (models.Model):
requestId = models.AutoField(primary_key = True)
customerId = models.ForeignKey(User,on_delete = models.CASCADE,related_name="Requests")
requestType = models.ForeignKey(RequestType,on_delete=models.CASCADE,related_name="Requests")
requestBody = models.CharField(max_length = 500)
requestStatus = models.IntegerField()
requestDate = models.DateTimeField(auto_now=True)
deleteRequest = models.BooleanField(default=False)
|
[
"nusk003@gmail.com"
] |
nusk003@gmail.com
|
722dacc742fa6c8689b0fc77c01ef3c20e230816
|
63f7520cecdf33f5e095d445cffe97257afed2bc
|
/gRPC/protolib/setup.py
|
82d6de3af1b25c54c07d458f0e0a0766b928b1af
|
[
"MIT"
] |
permissive
|
HarryMWinters/gRPYPI
|
e64cf2795d2504653ae31834ea95d75b3961af03
|
0ff0d61940a950d6d4b9395889ea4d94bd428184
|
refs/heads/master
| 2022-12-11T15:18:41.390730
| 2021-04-06T20:41:58
| 2021-04-06T20:41:58
| 224,575,210
| 0
| 0
|
MIT
| 2022-12-08T07:44:30
| 2019-11-28T05:17:41
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
import os
import setuptools
REQUIRED = ["google"]
cwd = os.getcwd()
with open(os.path.join(cwd, "README.md"), "r") as fh:
long_description = fh.read()
setuptools.setup(
name="protolib",
version="0.0.1",
author="Turkey M. Gobbles",
author_email="thanksgiving_sucks@freebird.com",
description="This package holds python code for working with protos.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/HarryMWinters/gRPYPI",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=REQUIRED,
)
|
[
"harrymcwinters@gmail.com"
] |
harrymcwinters@gmail.com
|
011f5397d4715b048e12513c08e693c65f7515ec
|
2b08c18c5ac84dc170eefb05d69e24800d34983e
|
/Datos/models.py
|
c0a351f24316205c077542e209c821d7d17c1885
|
[] |
no_license
|
wottan32/website
|
fda48f2f9c177f2aaf008c7b9bd94fbb06cb1de4
|
db05b866badab8d046ea9eeb8c061d2e66312f98
|
refs/heads/main
| 2023-06-17T00:51:28.821850
| 2021-07-14T17:50:41
| 2021-07-14T17:50:41
| 385,640,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from django.db import models
from django.utils import timezone
class Datos(models.Model):
id = models.IntegerField(primary_key=True, max_length=255, blank=False)
empresa_id = models.IntegerField(max_length=255, blank=False)
nombre = models.CharField(max_length=255, blank=False, default='')
razon_social = models.CharField(max_length=255, blank=False)
rut = models.IntegerField(max_length=255, blank=False)
plazo_pago = models.IntegerField(max_length=255, blank=False)
oc = models.BooleanField(blank=False)
giro = models.CharField(max_length=255, blank=False)
contacto_factura = models.CharField(max_length=255, blank=False)
direccion_legal = models.CharField(max_length=255, blank=False)
comuna_legal = models.CharField(max_length=255, blank=False)
contactos = models.CharField(max_length=255, blank=False)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
def __str__(self):
return str(self.id)
|
[
"mariotorreslagos@gmail.com"
] |
mariotorreslagos@gmail.com
|
44509062537c690a06e88cd48b1ca7dd496cfaa1
|
96120b14b37c5b9472459918e26a7d45f38c800e
|
/deals/tests/test_urls.py
|
923fcd1baf5c16fd0a8a736734d6f29a6d4af5dd
|
[] |
no_license
|
AnnaKPolyakova/todo
|
9a96fe1ff57dedb2272b9bde38aabeb4cf487494
|
aca22815df14f3a7f3a763f7c32f4f77d05cf425
|
refs/heads/master
| 2023-01-14T09:30:39.647293
| 2020-11-18T14:16:53
| 2020-11-18T14:16:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,848
|
py
|
from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from deals.models import Task
class TaskURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Создадим запись в БД для проверки доступности адреса task/test-slug/
Task.objects.create(
title='Тестовый заголовок',
text='Тестовый текст',
slug='test-slug'
)
def setUp(self):
# Создаем неавторизованный клиент
self.guest_client = Client()
# Создаем авторизованый клиент
user = get_user_model()
self.user = user.objects.create_user(username='StasBasov')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
# Проверяем общедоступные страницы
def test_home_url_exists_at_desired_location(self):
"""Страница / доступна любому пользователю."""
response = self.guest_client.get('/')
self.assertEqual(response.status_code, 200)
def test_task_added_url_exists_at_desired_location(self):
"""Страница /added/ доступна любому пользователю."""
response = self.guest_client.get('/added/')
self.assertEqual(response.status_code, 200)
# Проверяем доступность страниц для авторизованного пользователя
def test_task_list_url_exists_at_desired_location(self):
"""Страница /task/ доступна авторизованному пользователю."""
response = self.authorized_client.get('/task/')
self.assertEqual(response.status_code, 200)
def test_task_detail_url_exists_at_desired_location_authorized(self):
"""Страница /task/test-slug/ доступна авторизованному пользователю."""
response = self.authorized_client.get('/task/test-slug/')
self.assertEqual(response.status_code, 200)
# Проверяем редиректы для неавторизованного пользователя
def test_task_list_url_redirect_anonymous_on_admin_login(self):
"""Страница /task/ перенаправит анонимного пользователя
на страницу логина.
"""
response = self.guest_client.get('/task/', follow=True)
self.assertRedirects(
response, '/admin/login/?next=/task/')
def test_task_detail_url_redirect_anonymous_on_admin_login(self):
"""Страница /task/test_slug/ перенаправит анонимного пользователя
на страницу логина.
"""
response = self.client.get('/task/test-slug/', follow=True)
self.assertRedirects(
response, ('/admin/login/?next=/task/test-slug/'))
# Проверка вызываемых шаблонов для каждого адреса
def test_urls_uses_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
templates_url_names = {
'deals/home.html': '/',
'deals/added.html': '/added/',
'deals/task_list.html': '/task/',
'deals/task_detail.html': '/task/test-slug/',
}
for template, url in templates_url_names.items():
with self.subTest():
response = self.authorized_client.get(url)
self.assertTemplateUsed(response, template)
|
[
"proninc@yandex.ru"
] |
proninc@yandex.ru
|
8b4cfe11d4422ccf18a27d988b129c8663adfe53
|
acab80334c3a031e0418e3b98933d73bbebb9add
|
/PICK3_LOTTO.py
|
616e8e87758e72df407bf027100de7aa19af7d38
|
[] |
no_license
|
aneeshpartha/PICK3_LOTTO_GAME
|
1272141dd110cbbbe9e6749f24592f1aa9d50dbd
|
b43e5f687a9b724fb2b6a817b8c87fd6322c15f1
|
refs/heads/master
| 2020-03-18T00:04:53.685455
| 2018-05-19T16:01:06
| 2018-05-19T16:01:06
| 134,076,740
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,316
|
py
|
from datetime import datetime
import random
import sys
# Global variables
value1 = 0 # User input 1
value2 = 0 # user input 2
value3 = 0 # user input 3
randomnum = [] # Random number variable
fireball = "N/A" # fireball value
fboption = "N/A" # fireball option
attempts = 2 # Attempts to user
total = 0 # Total prize money
print("\t\t\tWelcome to pick 3 Lotto game")
print("\t\t\t****************************")
# Main function which invokes the entire game
def main():
global attempts
getuserdata() # Call to user input function
if(attempts == 2):
genrandomnum_fb() # Call to generate random sequence
result,fbresult = checker() # Call to check randomly generated sequence with user input
#Conditions based on match with random sequence and fireball number
if(result == True and fbresult == "wofb"):
print("Congratulations!!! You have won the pick 3 lotto without using Fireball... A $" +str(total)+ " cash prize is on your way")
summary("Win")
elif(result == True and fbresult == "wfbwin"):
print("Congratulations!!! You have won both pick 3 lotto and fireball...A $"+ str(total)+ "cash prize is on your way")
summary("win")
elif(result == True and fbresult == "wfblose"):
print("Congratulations!!! Though you lost fireball You have won the pick 3 lotto... A $" + str(total) + " cash prize is on your way.")
summary("win")
elif(result == False and fbresult == "wfbwin"):
print("Congratulations !!! Though you lost in pick 3 lotto You have won using fireball...A $"+str(total)+ " cash prize is on your way")
summary("Win")
elif(attempts != 0):
print("Sorry you have lost the game. You have " + str(attempts) + " attempt(s) left")
attempts -= 1
replay = input("\nDo you want to play again ?")
if ( replay == "y" or replay == "Y"):
main()
elif(replay == "n" or replay == "N"):
summary("lost")
sys.exit()
else:
print("Sorry you have lost the game. Nice try !!! Better luck next time")
summary("lost")
# Function to generate 3 random numbers
def genrandomnum_fb():
global randomnum
randomnum = sorted(random.sample(range(0,10),3)) #Usage of random.sample function
# Function to get user input and also to check user interest to play fireball
def getuserdata():
global value1,value2,value3
enteredvalue = 0
# loop to iterate in case of any errors
while (enteredvalue != 1):
try:
# User inputs integer value
value1 = int(input("\nPlease enter first number :"))
value2 = int(input("Please enter second number :"))
value3 = int(input("Please enter third number : "))
# Condition to check if the value is greater than or equal to 0 and less than 10
if ( value1 < 0 or value2 < 0 or value3 < 0 or value1 > 9 or value2 > 9 or value3 > 9):
print("Entered value is not valid. Please re-enter")
continue
global fboption
# Loop to check user interest to play fireball
while(fboption != "y" and fboption != "Y" and fboption != "n" and fboption != "N"):
fboption = input("\nDo you want to play pick three with fireball option?(Y/N) :")
if(fboption != "y" and fboption != "Y" and fboption != "n" and fboption != "N"):
print("Entered option is invalid. Please re-enter")
print("\nUser entered sequence:" + "[" +str(value1) + "," + str(value2)+ "," + str(value3) + "]")
enteredvalue = 1
except ValueError:
print("Sorry !!! The entered value is not valid.Please re-enter ")
enteredvalue = 0
# Function to check if user entered input matches the randomly generated sequence
def checker():
global fboption,total
if(randomnum == [value1,value2,value3]):
total = 100 # Prize amount without fireball play
if(fboption == "y" or fboption == "Y"):
return (True , checkfireball()) # Calling fireball function
else:
return (True, "wofb")
else:
if(fboption == "y" or fboption == "Y"):
return (False,checkfireball())
else:
return False,"wfbno"
# Function which randomly generates a fireball number and checks with the user sequence
def checkfireball():
global fireball,total
status = "wfblose"
if(attempts == 2 ):
fireball = random.randint(1, 9)
#print("FB:" + str(fireball))
if([fireball,value2,value3] == randomnum) :
total += 50 # for each fireball match user gets $50
status = "wfbwin"
if([value1,fireball,value3] == randomnum):
total+= 50
status = "wfbwin"
if([value1,value2,fireball] == randomnum):
total+= 50
status = "wfbwin"
return status
# This function prints the summary of the game at the end
def summary(decision):
print("\nBelow is the summary of the game")
print("User entered input :"+ "[" +str(value1) + "," + str(value2)+ "," + str(value3) + "]")
print("Gen:" + str(randomnum) + " " + "FB:" + str(fireball))
print("Result:" + decision)
print("Total prize = $"+str(total))
# main function is invoked
main()
|
[
"aneeshpartha5@gmail.com"
] |
aneeshpartha5@gmail.com
|
225a59db64cf10c4d3f3a469aebb5ebcd3451d82
|
da09119e2d5a6b46b3ea82060efd560fb003bb63
|
/answer_search.py
|
596491e68f32fb0db81203942cf6d081ef5e5e9c
|
[] |
no_license
|
sc1054/KGQA
|
1c1fa6c05253e6b97355fb1b4645755fbaf6130f
|
f40c64de9521bfb8747cb7f53d4532978faabd7e
|
refs/heads/master
| 2023-06-05T17:08:28.689485
| 2021-07-06T06:14:38
| 2021-07-06T06:14:38
| 382,249,511
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,045
|
py
|
#!/usr/bin/env python3
# coding: utf-8
from py2neo import Graph
class AnswerSearcher:
def __init__(self):
self.g = Graph(
host="127.0.0.1",
http_port=7474,
user="neo4j",
password="admin")
self.num_limit = 20
'''执行cypher查询,并返回相应结果'''
def search_main(self, sqls):
final_answers = []
for sql_ in sqls:
question_type = sql_['question_type']
queries = sql_['sql']
answers = []
for query in queries:
ress = self.g.run(query).data()
answers += ress
final_answer = self.answer_prettify(question_type, answers)
if final_answer:
final_answers.append(final_answer)
return final_answers
'''根据对应的qustion_type,调用相应的回复模板'''
def answer_prettify(self, question_type, answers):
final_answer = []
if not answers:
return ''
if question_type == 'disease_symptom':
desc = [i['n.name'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}的症状包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'symptom_disease':
desc = [i['m.name'] for i in answers]
subject = answers[0]['n.name']
final_answer = '症状{0}可能染上的疾病有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_cause':
desc = [i['m.cause'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}可能的成因有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_prevent':
desc = [i['m.prevent'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}的预防措施包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_lasttime':
desc = [i['m.cure_lasttime'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}治疗可能持续的周期为:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_cureway':
desc = [';'.join(i['m.cure_way']) for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}可以尝试如下治疗:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_cureprob':
desc = [i['m.cured_prob'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}治愈的概率为(仅供参考):{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_easyget':
desc = [i['m.easy_get'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}的易感人群包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_desc':
desc = [i['m.desc'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0},熟悉一下:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_acompany':
desc1 = [i['n.name'] for i in answers]
desc2 = [i['m.name'] for i in answers]
subject = answers[0]['m.name']
desc = [i for i in desc1 + desc2 if i != subject]
final_answer = '{0}的症状包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_not_food':
desc = [i['n.name'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}忌食的食物包括有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_do_food':
do_desc = [i['n.name'] for i in answers if i['r.name'] == '宜吃']
recommand_desc = [i['n.name'] for i in answers if i['r.name'] == '推荐食谱']
subject = answers[0]['m.name']
final_answer = '{0}宜食的食物包括有:{1}\n推荐食谱包括有:{2}'.format(subject, ';'.join(list(set(do_desc))[:self.num_limit]),
';'.join(list(set(recommand_desc))[:self.num_limit]))
elif question_type == 'food_not_disease':
desc = [i['m.name'] for i in answers]
subject = answers[0]['n.name']
final_answer = '患有{0}的人最好不要吃{1}'.format(';'.join(list(set(desc))[:self.num_limit]), subject)
elif question_type == 'food_do_disease':
desc = [i['m.name'] for i in answers]
subject = answers[0]['n.name']
final_answer = '患有{0}的人建议多试试{1}'.format(';'.join(list(set(desc))[:self.num_limit]), subject)
elif question_type == 'disease_drug':
desc = [i['n.name'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}通常的使用的药品包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'drug_disease':
desc = [i['m.name'] for i in answers]
subject = answers[0]['n.name']
final_answer = '{0}主治的疾病有{1},可以试试'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'disease_check':
desc = [i['n.name'] for i in answers]
subject = answers[0]['m.name']
final_answer = '{0}通常可以通过以下方式检查出来:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
elif question_type == 'check_disease':
desc = [i['m.name'] for i in answers]
subject = answers[0]['n.name']
final_answer = '通常可以通过{0}检查出来的疾病有{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit]))
return final_answer
if __name__ == '__main__':
searcher = AnswerSearcher()
ans = searcher.search_main([
{'question_type': 'disease_do_food',
'sql': ["MATCH (m:Disease)-[r:do_eat]->(n:Food) where m.name = '感冒' return m.name, r.name, n.name",
"MATCH (m:Disease)-[r:recommand_eat]->(n:Food) where m.name = '感冒' return m.name, r.name, n.name"
]
},
{'question_type': 'disease_drug',
'sql': ["MATCH (m:Disease)-[r:common_drug]->(n:Drug) where m.name = '感冒' return m.name, r.name, n.name",
"MATCH (m:Disease)-[r:recommand_drug]->(n:Drug) where m.name = '感冒' return m.name, r.name, n.name"
]
}
])
print(ans)
|
[
"sunchao1054@163.com"
] |
sunchao1054@163.com
|
17b2a62cc4f1f857d00ed0ae85f43453c9c534de
|
baa2d5c22a2164e5c8f6a3608578ffd9ad05133e
|
/model.py
|
4efec9e66f03de034c62f1ffd746efa5f785eb6d
|
[] |
no_license
|
DruidKuma/Self-Driving-Car-ND-Project-3-Behavior-Cloning
|
844b0a7041beeb92df296ebafbdb8e7811167b70
|
99cf8dd95b09701d4da28d67965460da826484b1
|
refs/heads/master
| 2021-08-26T08:35:42.771051
| 2017-11-22T15:05:51
| 2017-11-22T15:05:51
| 111,692,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
import numpy as np
import keras
import matplotlib.pyplot as plt
import csv
import cv2
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import *
from keras.layers import *
#resized image dimension in training
img_rows = 16
img_cols = 32
#batch size and epoch
batch_size=128
nb_epoch=15
delta = 0.35
def preprocess(img):
return cv2.resize((cv2.cvtColor(img, cv2.COLOR_RGB2HSV))[:,:,1],(img_cols,img_rows))
def load():
images = []
angles = []
# load camera images
with open('data/driving_log.csv','rt') as f:
reader = csv.reader(f)
next(reader, None)
for line in reader:
for j in range(2): #center, left, right images
img = plt.imread('data/'+line[j].strip())
images.append(preprocess(img))
angle = float(line[3])
if j == 1: angle += delta #for left image, we add delta
elif j == 2: angle -= delta #for right image we subtract delta
angles.append(angle)
# convert to numpy arrays
X_train = np.array(images).astype('float32')
y_train = np.array(angles).astype('float32')
# add augmented data (reflect horizontally each image)
X_train = np.append(X_train,X_train[:,:,::-1],axis=0)
y_train = np.append(y_train,-y_train,axis=0)
# shuffle data
X_train, y_train = shuffle(X_train, y_train)
# convert into required shape
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
#split into train and validation sets
return train_test_split(X_train, y_train, random_state=0, test_size=0.25)
if __name__ == '__main__':
#load data
X_train, X_val, y_train, y_val = load()
#Create an train the model
model = Sequential([
Lambda(lambda x: x/127.5 - 1.,input_shape=(img_rows,img_cols,1)),
Conv2D(2, 3, 3, border_mode='valid', input_shape=(img_rows,img_cols,1), activation='relu'),
MaxPooling2D((4,4),(4,4),'valid'),
Dropout(0.25),
Flatten(),
Dense(1)
])
model.compile(loss='mean_squared_error',optimizer='adam')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_val, y_val))
# Save the model and weights
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Model Saved.")
|
[
"druidkuma@gmail.com"
] |
druidkuma@gmail.com
|
d113113170e962dbff65c3230b93efbbfc374af5
|
d5daac6b925b5cbe93002cec47db0d87f52d2353
|
/gradient_descent_method.py
|
10bede50334cd8bfa66c1dd240acbcf779fd4994
|
[] |
no_license
|
mfouda/Convex-Optimization-Algorithms
|
5b1c2beb3992812ec3c0741f2aba48215a89a15c
|
f3a77fc4ff4e5bb600c25955b4e826795fe9a379
|
refs/heads/master
| 2021-01-19T10:27:04.372432
| 2017-04-02T07:04:31
| 2017-04-02T07:04:31
| 87,868,241
| 1
| 0
| null | 2017-04-10T23:53:56
| 2017-04-10T23:53:56
| null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
import numpy as np
import basic_algorithms as ba
def gradientDescentMethod(func, initialX, displayDetail= False, decimal= 3, accuracy= 1e-6):
counter = 0
while True:
if displayDetail == True:
print("ITERATION TIMES: " + str(counter))
negativeGrad = - ba.numerical_differentiate.Grad(func, initialX)
if displayDetail == True:
print("NEG-GRADIENT AT POINT " + str(initialX.round(decimal))
+ " IS " + str(negativeGrad.round(decimal)))
optAlpha = ba.linear_search.GoldenSectionMethod(func, initialX, negativeGrad, accuracy)
newX = initialX + optAlpha * negativeGrad
if displayDetail == True:
print("THE NEXT POINT IS: " + str(newX.round(decimal)))
ba.decoration.plotDashLine()
if ba.check_condition.isConvergent(initialX, newX, accuracy= 1e-6):
if displayDetail == True:
print("ITERATION BREAK! THE MINIMAL VALUE OBTAINED AT POINT: "
+ str(initialX.round(decimal)))
print("THE MINIMAL VALUE OF FUNCTION IS: " + str(round(func(initialX), decimal)))
return initialX
else:
initialX = newX.copy()
counter += 1
# ===============================================
def myFunc(x):
return 2*(x[0]+2.3)**2 + (x[1]-1.5)**2
initialX = np.array([8., 5.], dtype= float)
optX = gradientDescentMethod(myFunc, initialX, displayDetail= True)
|
[
"bk20130048@my.swjtu.edu.cn"
] |
bk20130048@my.swjtu.edu.cn
|
f608b00977c20e9a0507628e7f62a20068ff2af6
|
cae13e31d802bd011af10f2b7c943e01ec8ba3a0
|
/curve.py
|
57250375b079cb6575871dacb8fcaf711ef7cd3f
|
[] |
no_license
|
VKAnirudh14/C-PRO-98
|
546e82dfb0dd5fe135854094108931a37e06a2ab
|
f4c51aec783b6697872c303e356c3bd3bc896d75
|
refs/heads/main
| 2023-04-05T03:10:26.362438
| 2021-04-05T03:40:28
| 2021-04-05T03:40:28
| 354,707,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import plotly.figure_factory as ff
import pandas as pd
df = pd.read_csv('data.csv')
fig = ff.create_distplot([df['Avg Rating'].tolist()], ['Average Rating'])
fig.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
627b37547257bf028218c394028ba638d78fb0a6
|
bdd8fe60144b364dade0c383ba9ac7a400457c69
|
/freight/api/task_log.py
|
1f03bac3b2cbdff6275b4d8d6b4886d30a94c799
|
[
"Apache-2.0"
] |
permissive
|
thoas/freight
|
61eda7cb397696eb2c3a7504d03f2f4654ad7e8f
|
9934cfb3c868b5e4b813259ca83c748676d598a0
|
refs/heads/master
| 2021-01-18T17:24:25.758448
| 2015-09-03T20:45:35
| 2015-09-03T20:45:36
| 41,413,179
| 1
| 0
| null | 2015-08-26T08:13:07
| 2015-08-26T08:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
from __future__ import absolute_import
from flask_restful import reqparse
from freight.api.base import ApiView
from freight.config import db
from freight.models import LogChunk
from .task_details import TaskMixin
class TaskLogApiView(ApiView, TaskMixin):
get_parser = reqparse.RequestParser()
get_parser.add_argument('offset', location='args', type=int, default=0)
get_parser.add_argument('limit', location='args', type=int)
def get(self, **kwargs):
"""
Retrieve task log.
"""
task = self._get_task(**kwargs)
if task is None:
return self.error('Invalid task', name='invalid_resource', status_code=404)
args = self.get_parser.parse_args()
queryset = db.session.query(
LogChunk.text, LogChunk.offset, LogChunk.size
).filter(
LogChunk.task_id == task.id,
).order_by(LogChunk.offset.asc())
if args.offset == -1:
# starting from the end so we need to know total size
tail = db.session.query(LogChunk.offset + LogChunk.size).filter(
LogChunk.task_id == task.id,
).order_by(LogChunk.offset.desc()).limit(1).scalar()
if tail is None:
logchunks = []
else:
if args.limit:
queryset = queryset.filter(
(LogChunk.offset + LogChunk.size) >= max(tail - args.limit + 1, 0),
)
else:
if args.offset:
queryset = queryset.filter(
LogChunk.offset >= args.offset,
)
if args.limit:
queryset = queryset.filter(
LogChunk.offset < args.offset + args.limit,
)
logchunks = list(queryset)
if logchunks:
next_offset = logchunks[-1].offset + logchunks[-1].size
else:
next_offset = args.offset
links = self.build_cursor_link('next', next_offset)
context = {
'text': ''.join(l.text for l in logchunks),
'nextOffset': next_offset,
}
return self.respond(context, links=links)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
dd1a1863ce60dfb7b86fcd62c682a6bc2ad6f9a1
|
189ed26b70c45291e5b3659945dcc36b0c764570
|
/SmartDepot-MotionDetect/Motion.py
|
e886c0f65d93bfbabb75aa5c9b8b64cff5b02f28
|
[] |
no_license
|
shivendra29/SmartDepot
|
0c0bc3d6a24d656e712b5efa0b643b1ba298a289
|
6b07f90a10b4981a94efa1b9f874df876f5c7ccb
|
refs/heads/master
| 2020-04-27T09:02:41.312370
| 2018-10-01T10:42:16
| 2018-10-01T10:42:16
| 174,198,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,492
|
py
|
#Import OpenCV for processing of images
#Import Time for window time logging
#Import DateTime for saving time stamp of motion detection events
#Import Pandas for dataframe and CSV creation
import cv2, time
from datetime import datetime
from SendEmail import sendmail
#reference background frame against which to compare the presence of object/motion
first_frame=None
#Capture video feed from webcam (0), use video filename here for pre-recorded video
video = cv2.VideoCapture(0)
statusList=[-1, -1] #stores the presence/absence of object in the present frame. -1 for absent and 1 for present
times=[] #stores timestamps of the entry and exit of object
#the following loop continuously captures and displays the video feed until user prompts an exit by pressing Q
while True:
#the read function gives two outputs. The check is a boolean function that returns if the video is being read
check, frame = video.read()
status=-1 #initialise status variable. This stores the presence/absence of object in the current frame
grayImg=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) #Grayscale conversion of the frame
#Gaussian blur to smoothen the image and remove noise.
#The touple is the Kernel size and the 0 is the Std Deviation of the blur function
grayImg=cv2.GaussianBlur(grayImg, (21,21),0)
if first_frame is None:
first_frame=grayImg #collect the reference frame as the first video feed frame
continue
#calculates the absolute difference between current frame and reference frame
deltaFrame=cv2.absdiff(first_frame,grayImg)
#convert image from grayscale to binary. This increases the demarcation between object and background by using a threshold function that
#converts everything above threshold to white
threshFrame=cv2.threshold(deltaFrame, 30, 255, cv2.THRESH_BINARY)[1]
#dilating the threshold removes the sharp edges at the object/background boundary and makes it smooth.
#More the iterations, smoother the image. Too smooth and you lose valuable data
threshFrame=cv2.dilate(threshFrame, None, iterations=2)
#Contour Function
#The contour function helps identify the closed object areas within the background.
#After thresholding, the frame has closed shapes of the objects against the background
#The contour function identifies and creates a list (cnts) of all these contours in the frame
#The RETR_EXTERNAL ensures that you only get the outermost contour details and all child contours inside it are ignored
#The CHAIN_APPROX_SIMPLE is the approximation method used for locating the contours. The simple one is used here for our trivial purpose
#Simple approximation removes all the redundant points in the description of the contour line
(_,cnts,_)=cv2.findContours(threshFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 10000:
#excluding too small contours. Set 10000 (100x100 pixels) for objects close to camera
continue
status=1
#obtain the corresponding bounding rectangle of our detected contour
(x, y, w, h) = cv2.boundingRect(contour)
#superimpose a rectangle on the identified contour in our original colour image
#(x,y) is the top left corner, (x+w, y+h) is the bottom right corner
#(0,255,0) is colour green and 3 is the thickness of the rectangle edges
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 3)
#do the above for all contours greater than our set size
#add the present status to our list
statusList.append(status)
cv2.imshow("Colour Frame", frame)
#Detecting the entry and exit of objects
#Every entry/exit is identified by a sign change of the last two elements in our list, hence product is -1
if (statusList[-1]*statusList[-2])==-1:
#success,img_data = video.read()
#if success:
# cv2.imwrite("intruder.jpg", img_data) # save frame as JPEG file
# cv2.waitKey()
if (statusList[-1]==1):
sendmail()
#unitTesting
#cv2.imshow("Capturing", grayImg)
#cv2.imshow("DeltaFrame", deltaFrame)
#cv2.imshow("Threshold Frame", threshFrame)
#print(status)
#displays the continous feed with the green frame for any foreign object in frame
#picks up the key press Q and exits when pressed
key=cv2.waitKey(1)
if key==ord('q'):
#if foreign object is in frame at the time of exiting, it stores the timestamp
if status==1:
times.append(datetime.now())
break
#print(statusList)
#print(times)
#Closes all windows
cv2.destroyAllWindows()
#Releases video file/webcam
video.release()
|
[
"33812863+shivendra29@users.noreply.github.com"
] |
33812863+shivendra29@users.noreply.github.com
|
a1cf5368c4eea778d041c5af86d0bf3a3f4abd62
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_sodden.py
|
9a58a5d33739918fea93b36c32416d0d46ba6316
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#calss header
class _SODDEN():
def __init__(self,):
self.name = "SODDEN"
self.definitions = [u'(of something that can absorb water) extremely wet: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
54b746939497754c0edd8dc3570743f4ee22d180
|
9258ae775a26916853c4c42ec7569a1368dbd2af
|
/config/settings.py
|
b2688aa1626a40c71d917a15997884dfff0609d8
|
[] |
no_license
|
rornfl9731/airBnB_Project
|
0a7a6de9bb7afcf5b038acd757523287c9e70a20
|
d80f5c3d7869e4e6a1968d45ff6cb4b909327994
|
refs/heads/master
| 2020-08-24T03:20:33.779450
| 2019-11-28T19:41:21
| 2019-11-28T19:41:21
| 216,754,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5ewf%01yndnft76w+)z$)6yvlcvhd-!9hg3uvnou_!ecx@^_%s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = ["django_countries", "django_seed"]
PROJECT_APPS = [
"core.apps.CoreConfig",
"users.apps.UsersConfig",
"rooms.apps.RoomsConfig",
"reviews.apps.ReviewsConfig",
"reservations.apps.ReservationsConfig",
"lists.apps.ListsConfig",
"conversations.apps.ConversationsConfig",
]
INSTALLED_APPS = DJANGO_APPS + PROJECT_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = "/media/"
AUTH_USER_MODEL = "users.User"
# Email
EMAIL_HOST = "smtp.mailgun.org"
EMAIL_PORT = "587"
EMAIL_HOST_USER = os.environ.get("MAILGUN_USERNAME")
EMAIL_HOST_PASSWORD = os.environ.get("MAILGUN_PASSWORD")
EMAIL_FROM = "postmaster@sandboxd5af9e8be5c54a23962070abcf8ea99d.mailgun.org"
|
[
"inx0930@gmail.com"
] |
inx0930@gmail.com
|
561baf6fe72f9366e547ec35dbdac517d7c39548
|
dbbdb45032ca3e5ffe9b0681fcb3d44ec6f2a189
|
/singapore/nea_api/__init__.py
|
2f8da51ee1cfc2336e522d2470fea10aa816702f
|
[] |
no_license
|
codelah/singapore
|
c63666a4c24ee123e293e875453afb088c60024c
|
fea6bad783c07894338f92700c18d5b42de07ede
|
refs/heads/master
| 2021-01-19T00:59:49.106203
| 2017-08-12T03:29:00
| 2017-08-12T03:29:00
| 61,871,329
| 16
| 2
| null | 2017-08-12T03:29:01
| 2016-06-24T08:55:20
|
Python
|
UTF-8
|
Python
| false
| false
| 22
|
py
|
from nea_api import *
|
[
"zhchua@gmail.com"
] |
zhchua@gmail.com
|
282399609317833cf35a9418fdac25bece55fe85
|
5afc3043b9b43a0e72bc94a90ed832a9576bb580
|
/base/skill_59/py_06/py_44_copyreg.py
|
d527bd7b1839bbf108f061884156436b5976dfb3
|
[] |
no_license
|
JR1QQ4/python
|
629e7ddec7a261fb8a59b834160ceea80239a0f7
|
a162a5121fdeeffbfdad9912472f2a790bb1ff53
|
refs/heads/main
| 2023-08-25T00:40:25.975915
| 2021-11-07T14:10:20
| 2021-11-07T14:10:20
| 311,769,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# 第 44 条: 用 copyreg 实现可靠的 pickle 操作
# 内置的 pickle 模块,只适合用来在彼此信任的程序之间,对相关对象执行序列化和反序列化操作
# 如果用法比较复杂,那么 pickle 模块的功能也许就会出问题
# 我们可以把内置的 copyreg 模块同 pickle 结合起来使用,以便为旧数据缺失的属性值、进行类的版本管理,
# 并给序列化之后的数据提供固定的引入路径
|
[
"chenjunrenyx@163.com"
] |
chenjunrenyx@163.com
|
88562a0aab5a88242fbd50593bb4ab3c914d7b46
|
86338779e91ed9b822fa5e0b213a4e3ad85ecadc
|
/configs/project_config.py
|
2f1e1e6887ac8b9f6169ba2daaead553e0ad6bbb
|
[] |
no_license
|
goddoe/weakly-supervised-detector
|
b423d1a4e534629961288c5b647f136d616e49f7
|
901c0a83e65dd438ae995412f2a59d299b9d4f24
|
refs/heads/master
| 2021-07-05T07:10:53.791119
| 2019-03-29T09:39:02
| 2019-03-29T09:39:02
| 120,253,624
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
import os
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
[
"goddoe2@gmail.com"
] |
goddoe2@gmail.com
|
6c249c704fb9dcad286b896aac14b4023e741304
|
98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3
|
/bingads/v13/bulk/entities/audiences/bulk_campaign_negative_product_audience_association.py
|
80d8c4fc61c6babc61466c3ac50597c9c0a847f1
|
[
"MIT"
] |
permissive
|
BingAds/BingAds-Python-SDK
|
a2f9b0c099b574a4495d0052218f263af55cdb32
|
373a586402bf24af7137b7c49321dbc70c859fce
|
refs/heads/main
| 2023-07-27T15:31:41.354708
| 2023-07-10T03:21:03
| 2023-07-10T03:21:03
| 31,927,550
| 105
| 182
|
NOASSERTION
| 2023-09-04T06:51:20
| 2015-03-09T23:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 586
|
py
|
from bingads.v13.bulk.entities.audiences.bulk_campaign_negative_audience_association import *
class BulkCampaignNegativeProductAudienceAssociation(BulkCampaignNegativeAudienceAssociation):
""" Represents an Campaign Negative Product Audience Association that can be read or written in a bulk file.
For more information, see Campaign Negative Product Audience Association at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
|
[
"qitia@microsoft.com"
] |
qitia@microsoft.com
|
8a5591e66fcadfc9f649b0d75c3579d01549b7b8
|
2e5c0e502216b59a4e348437d4291767e29666ea
|
/Flask-Web/flasky/Lib/site-packages/dns/tokenizer.py
|
3e5d2ba92e8762532c2b3c5d6cbd0170298b26c7
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
fengzse/Feng_Repository
|
8881b64213eef94ca8b01652e5bc48e92a28e1f5
|
db335441fa48440e72eefab6b5fd61103af20c5d
|
refs/heads/master
| 2023-07-24T04:47:30.910625
| 2023-02-16T10:34:26
| 2023-02-16T10:34:26
| 245,704,594
| 1
| 0
|
Apache-2.0
| 2023-07-15T00:54:20
| 2020-03-07T20:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 20,833
|
py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS master file format"""
import io
import sys
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {' ', '\t', '\n', ';', '(', ')', '"'}
_QUOTING_DELIMITERS = {'"'}
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""An attempt was made to unget a token when the unget buffer was full."""
class Token:
"""A DNS master file format token.
ttype: The token type
value: The token value
has_escape: Does the token value contain escapes?
"""
def __init__(self, ttype, value='', has_escape=False):
"""Initialize a token instance."""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
def is_eof(self):
return self.ttype == EOF
def is_eol(self):
return self.ttype == EOL
def is_whitespace(self):
return self.ttype == WHITESPACE
def is_identifier(self):
return self.ttype == IDENTIFIER
def is_quoted_string(self):
return self.ttype == QUOTED_STRING
def is_comment(self):
return self.ttype == COMMENT
def is_delimiter(self): # pragma: no cover (we don't return delimiters yet)
return self.ttype == DELIMITER
def is_eol_or_eof(self):
return self.ttype == EOL or self.ttype == EOF
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
if not self.has_escape:
return self
unescaped = ''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
unescaped += c
return Token(self.ttype, unescaped)
def unescape_to_bytes(self):
# We used to use unescape() for TXT-like records, but this
# caused problems as we'd process DNS escapes into Unicode code
# points instead of byte values, and then a to_text() of the
# processed data would not equal the original input. For
# example, \226 in the TXT record would have a to_text() of
# \195\162 because we applied UTF-8 encoding to Unicode code
# point 226.
#
# We now apply escapes while converting directly to bytes,
# avoiding this double encoding.
#
# This code also handles cases where the unicode input has
# non-ASCII code-points in it by converting it to UTF-8. TXT
# records aren't defined for Unicode, but this is the best we
# can do to preserve meaning. For example,
#
# foo\u200bbar
#
# (where \u200b is Unicode code point 0x200b) will be treated
# as if the input had been the UTF-8 encoding of that string,
# namely:
#
# foo\226\128\139bar
#
unescaped = b''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
unescaped += b'%c' % (int(c) * 100 + int(c2) * 10 + int(c3))
else:
# Note that as mentioned above, if c is a Unicode
# code point outside of the ASCII range, then this
# += is converting that code point to its UTF-8
# encoding and appending multiple bytes to
# unescaped.
unescaped += c.encode()
else:
unescaped += c.encode()
return Token(self.ttype, bytes(unescaped))
class Tokenizer:
"""A DNS master file format tokenizer.
A token object is basically a (type, value) tuple. The valid
types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
COMMENT, and DELIMITER.
file: The file to tokenize
ungotten_char: The most recently ungotten character, or None.
ungotten_token: The most recently ungotten token, or None.
multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
quoting: This variable is true if the tokenizer is currently
reading a quoted string.
eof: This variable is true if the tokenizer has encountered EOF.
delimiters: The current delimiter dictionary.
line_number: The current line number
filename: A filename that will be returned by the where() method.
idna_codec: A dns.name.IDNACodec, specifies the IDNA
encoder/decoder. If None, the default IDNA 2003
encoder/decoder is used.
"""
def __init__(self, f=sys.stdin, filename=None, idna_codec=None):
"""Initialize a tokenizer instance.
f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
filename: the name of the filename that the where() method
will return.
idna_codec: A dns.name.IDNACodec, specifies the IDNA
encoder/decoder. If None, the default IDNA 2003
encoder/decoder is used.
"""
if isinstance(f, str):
f = io.StringIO(f)
if filename is None:
filename = '<string>'
elif isinstance(f, bytes):
f = io.StringIO(f.decode())
if filename is None:
filename = '<string>'
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
else:
filename = '<file>'
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
self.filename = filename
if idna_codec is None:
idna_codec = dns.name.IDNA_2003
self.idna_codec = idna_codec
def _get_char(self):
"""Read a character from input.
"""
if self.ungotten_char is None:
if self.eof:
c = ''
else:
c = self.file.read(1)
if c == '':
self.eof = True
elif c == '\n':
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
"""Return the current location in the input.
Returns a (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c):
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
c: the character to unget
raises UngetBufferFull: there is already an ungotten char
"""
if self.ungotten_char is not None:
# this should never happen!
raise UngetBufferFull # pragma: no cover
self.ungotten_char = c
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
Returns the number of characters skipped.
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading=False, want_comment=False):
"""Get the next token.
want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
Raises dns.exception.UnexpectedEnd: input ended prematurely
Raises dns.exception.SyntaxError: input was badly formed
Returns a Token.
"""
if self.ungotten_token is not None:
token = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if want_leading:
return token
elif token.is_comment():
if want_comment:
return token
else:
return token
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
if self.multiline <= 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
while 1:
c = self._get_char()
if c == '\n' or c == '':
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
if self.multiline:
raise dns.exception.SyntaxError(
'unbalanced parentheses')
return Token(EOF)
elif self.multiline:
self.skip_whitespace()
token = ''
continue
else:
return Token(EOL, '\n')
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting and c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == '' or c == '\n':
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
token: the token to unget
Raises UngetBufferFull: there is already an ungotten token
"""
if self.ungotten_token is not None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
Returns a Token.
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
__next__ = next
def __iter__(self):
return self
# Helpers
def get_int(self, base=10):
"""Read the next token and interpret it as an unsigned integer.
Raises dns.exception.SyntaxError if not an unsigned integer.
Returns an int.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value, base)
def get_uint8(self):
"""Read the next token and interpret it as an 8-bit unsigned
integer.
Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
Returns an int.
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError(
'%d is not an unsigned 8-bit integer' % value)
return value
def get_uint16(self, base=10):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
Returns an int.
"""
value = self.get_int(base=base)
if value < 0 or value > 65535:
if base == 8:
raise dns.exception.SyntaxError(
'%o is not an octal unsigned 16-bit integer' % value)
else:
raise dns.exception.SyntaxError(
'%d is not an unsigned 16-bit integer' % value)
return value
def get_uint32(self, base=10):
"""Read the next token and interpret it as a 32-bit unsigned
integer.
Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
Returns an int.
"""
value = self.get_int(base=base)
if value < 0 or value > 4294967295:
raise dns.exception.SyntaxError(
'%d is not an unsigned 32-bit integer' % value)
return value
def get_string(self, max_length=None):
"""Read the next token and interpret it as a string.
Raises dns.exception.SyntaxError if not a string.
Raises dns.exception.SyntaxError if token value length
exceeds max_length (if specified).
Returns a string.
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
if max_length and len(token.value) > max_length:
raise dns.exception.SyntaxError("string too long")
return token.value
def get_identifier(self):
"""Read the next token, which should be an identifier.
Raises dns.exception.SyntaxError if not an identifier.
Returns a string.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value
def concatenate_remaining_identifiers(self):
"""Read the remaining tokens on the line, which should be identifiers.
Raises dns.exception.SyntaxError if a token is seen that is not an
identifier.
Returns a string containing a concatenation of the remaining
identifiers.
"""
s = ""
while True:
token = self.get().unescape()
if token.is_eol_or_eof():
break
if not token.is_identifier():
raise dns.exception.SyntaxError
s += token.value
return s
def as_name(self, token, origin=None, relativize=False, relativize_to=None):
"""Try to interpret the token as a DNS name.
Raises dns.exception.SyntaxError if not a name.
Returns a dns.name.Name.
"""
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
name = dns.name.from_text(token.value, origin, self.idna_codec)
return name.choose_relativity(relativize_to or origin, relativize)
def get_name(self, origin=None, relativize=False, relativize_to=None):
"""Read the next token and interpret it as a DNS name.
Raises dns.exception.SyntaxError if not a name.
Returns a dns.name.Name.
"""
token = self.get()
return self.as_name(token, origin, relativize, relativize_to)
def get_eol(self):
"""Read the next token and raise an exception if it isn't EOL or
EOF.
Returns a string.
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
'expected EOL or EOF, got %d "%s"' % (token.ttype,
token.value))
return token.value
def get_ttl(self):
"""Read the next token and interpret it as a DNS TTL.
Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
identifier or badly formed.
Returns an int.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.ttl.from_text(token.value)
|
[
"fzhuse@gmail.com"
] |
fzhuse@gmail.com
|
222f0e68d1e3968f67a837653d786d78e9316e0c
|
56b174addb87128ef54c85d2701b222b21877bbb
|
/settings_tornado.py
|
cfe56e91c425ecd2e6699d4db0e35992861de686
|
[
"BSD-2-Clause-Views"
] |
permissive
|
kwarodom/bemoss_web_ui-1
|
f8abbe7defc099bc40ff3c9c2b10c143a22ddbe5
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
refs/heads/master
| 2020-12-11T01:37:47.205927
| 2016-03-16T21:35:38
| 2016-03-16T21:35:38
| 54,082,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,265
|
py
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "aribemoss@gmail.com"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
from django.core.urlresolvers import reverse_lazy
# Django settings for bemoss_web_ui project.
import os
import sys
DEBUG = True
#DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('kruthika', 'kruthika@vt.edu'),
)
PROJECT_DIR = os.path.dirname(__file__)
sys.path.insert(1, PROJECT_DIR + '/lib/clock')
MANAGERS = ADMINS
LOGIN_REDIRECT_URL = reverse_lazy('/login')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'bemossdb', # Or path to database file if using sqlite3.
'USER': 'admin', # Not used with sqlite3.
'PASSWORD': 'admin', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'smap': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'archiver', # Or path to database file if using sqlite3.
'USER': 'admin', # Not used with sqlite3.
'PASSWORD': 'admin', # Not used with sqlite3.
'HOST': '127.0.0.1',
'PORT': '',
},
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['bemoss.com','localhost','38.68.232.107','127.0.0.1']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = PROJECT_DIR + '/resources/'
#MEDIA_ROOT = PROJECT_DIR + '/logs/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/dummy/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'TEMP_KEY'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
#'corsheaders.middleware.CorsMiddleware',
'_utils.dos_secure.middleware.BanishMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'_utils.lockout.middleware.LockoutMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'Your Email'
EMAIL_HOST_PASSWORD = 'Your Password'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = 'Your Email'
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
'django.contrib.messages.context_processors.messages',
)
APPEND_SLASH = True
BANISH_ENABLED = True
BANISH_EMPTY_UA = False
BANISH_ABUSE_THRESHOLD = 50
BANISH_MESSAGE = "Excessive attempts to reach BEMOSS Server. IP Banned."
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'apps.accounts',
'zmq',
'apps.dashboard',
'apps.thermostat',
'apps.smartplug',
'apps.admin',
'apps.lighting',
'clock',
'volttron',
'apps.error',
'apps.alerts',
'apps.schedule',
'apps.VAV',
'apps.RTU',
'tablib',
'_utils',
'_utils.dos_secure',
'apps.discovery'
#'_utils.lockout'
#'apps.registration'
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
BOOTSTRAP3 = {
'jquery_url': '//code.jquery.com/jquery.min.js',
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.0.3/',
'css_url': None,
'theme_url': None,
'javascript_url': None,
'horizontal_label_class': 'col-md-2',
'horizontal_field_class': 'col-md-4',
}
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
'''LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_DIR, 'logs/bemoss_ui_log.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_DIR, 'logs/django_request.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
}
}'''
|
[
"aribemoss@gmail.com"
] |
aribemoss@gmail.com
|
ccda7e8a0c61a04ef2333397f33e10f530c1f1e3
|
24c084fb46ef0192b742d81d7ffcdeaa89e62909
|
/testes/GLC/teste_geradores.py
|
0fdce56e3aa7c7002dd15a2687aef57d16b83a56
|
[] |
no_license
|
MarcosECarvalho/Teoria-da-Computacao
|
88e4a59b4de1efd5144599ed0f706ae4f7d82db4
|
df74ea3c7080b2183ace5928a0865e250b8d1767
|
refs/heads/main
| 2023-08-26T21:31:55.293326
| 2021-11-08T14:51:03
| 2021-11-08T14:51:03
| 425,864,238
| 0
| 0
| null | 2021-11-08T14:13:09
| 2021-11-08T14:13:08
| null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
import tc.glc as glc
#ALUNO : ADEOYE SUNDAY LADELE
#TESTE DA FUNCAO GERADORES()
#IMPLEMENTACAO 66
V = {'E', 'I'}
T = {'+', '*', '-', '(', ')', 'a', 'b', '0', '1'}
P = {('E', ('I')),
('E', ('E', '+', 'E')),
('E', ('E', '*', 'E')),
('E', ('E', '-', 'E')),
('E', ('(', 'E', ')')),
('I', ('a')),
('I', ('b')),
('I', ('I', 'a')),
('I', ('I', 'b')),
('I', (0)),
('I', (1))
}
G = (V, T, P, 'E')
A = {'S','T','C'}
B = {'a','b'}
C = {('S',('aS')),
('S',()),
('S',('a','T')),
('S',('a','C')),
('T',('b','T')),
('T',()),
}
M = (A,B,C,'S')
def test_gerador():
assert glc.geradores(G) == {'E','I','+', '*', '-', '(', ')', 'a', 'b', '0', '1'}
assert glc.geradores(M) == {'S','T','a','b'}
|
[
"glasscar46@gmail.com"
] |
glasscar46@gmail.com
|
7115c90cccdb2651de2a3381d355671fc1837512
|
f3b9d2f2524c95f474eaa977fbe2affad0d09fd3
|
/综合内容/github_pull的笔记/Django框架笔记/组件合集/django-admin-master/admin_web/king_admin/form.py
|
203e61cb1826f3eb0640bbf79d1a8bdefc6292fc
|
[] |
no_license
|
weiinng/down
|
4e42bc84477d9caa94923be3201023faa646c278
|
5fb88636ac6832ea5e0f101a3483a866a10c3f9e
|
refs/heads/master
| 2021-07-20T01:38:50.959998
| 2020-09-11T09:15:34
| 2020-09-11T09:15:34
| 209,210,195
| 2
| 0
| null | 2019-09-18T04:03:03
| 2019-09-18T03:35:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
from django.forms import ModelForm
from django.forms import ValidationError
from django.utils.translation import ugettext as _
# 动态生成modelform
def dynamic_class(rquest,admin_class):
def default_clean(self):
# 默认给所有的form添加一个 clean方法
print('----- run clean',self.cleaned_data)
error_list = []
print('idididid',self.instance.id)
if hasattr(admin_class, 'one'):
for i in admin_class.one:
self.add_error(i, "唯一字段")
if self.instance.id:
for field in admin_class.readonly_fields:
field_val = getattr(self.instance,field)
print(dir(field_val))
if hasattr(field_val,'select_related'):
m2m_obj = getattr(field_val,'select_related')().select_related()
m2m_vals = [i[0] for i in m2m_obj.values_list('id')]
if set(m2m_vals) != set([ i.id for i in self.cleaned_data.get(field)]):
print('-----set',set(m2m_vals),set([ i.id for i in self.cleaned_data.get(field)]))
self.add_error(field, "不能被修改")
continue
field_val_form_frontend = self.cleaned_data.get(field)
print('----->',field_val,field_val_form_frontend)
if field_val != field_val_form_frontend:
# error_list.append(ValidationError(
# _("Field %(field)s readonly,data should be %(value)s" ),
# code='invalid',
# params={'field':field,'value':field_val},
# ))
self.add_error(field , "不能被修改")
if admin_class.readonly_table:
raise ValidationError(
_("这张表只读无法修改" ),
code='invalid',
)
self.ValidationError = ValidationError
admin_class.default_form_validation(self)
# if response:
# for error_data in response:
# error_list.append(error_data)
# if error_list:
# raise ValidationError(error_list)
# 执行用户自己的 clean方法
def __new__(cls,*args, **kwargs):
for field_name,obj in cls.base_fields.items():
# 用来给生成的标签 添加样式
obj.widget.attrs['class'] = 'form-control'
if not hasattr(admin_class,'_status'):
if field_name in admin_class.readonly_fields:
obj.widget.attrs['disabled'] = 'disabled'
if hasattr(admin_class,"clean_%s" % field_name):
field_clean_func = getattr(admin_class,"clean_%s" % field_name)
setattr(cls, "clean_%s" % field_name,field_clean_func)
return ModelForm.__new__(cls)
class Meta:
model = admin_class.module
fields = '__all__'
parameter = {'Meta':Meta}
d_class = type('Dynamic_class',(ModelForm,),parameter)
setattr(d_class,'__new__',__new__)
setattr(d_class, 'clean', default_clean)
return d_class
|
[
"51934867+zhangwiening@users.noreply.github.com"
] |
51934867+zhangwiening@users.noreply.github.com
|
417b6ec0940ae33da0face74e31339022328e995
|
c54acc4ca303442fbad7e69b3f41233df8383c3e
|
/db_importer/preprocess_service.py
|
e633ddcd97f5ba76e7780e7ed633e69c874a00e0
|
[] |
no_license
|
konskoehler/ma
|
e6f702d22437ccd0d01479c7cef25e9b6ecf0eac
|
780b4e327a5b22dc798aa16d3168c36c40998309
|
refs/heads/main
| 2023-06-18T06:10:40.965685
| 2021-07-09T09:36:15
| 2021-07-09T09:36:15
| 382,626,921
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
from pyproj import Proj
import utils
from geopy.distance import great_circle
def preprocess_basics(df):
df_shift = df.shift(1)
df['l_lon'] = df_shift['lon']
df['l_lat'] = df_shift['lat']
df = df[~df['l_lon'].isnull()]
df['duration'] = (df['timestamp'] - df_shift['timestamp']).dt.total_seconds()
df['dist'] = df.apply(lambda x: great_circle([x['l_lat'], x['l_lon']], [x['lat'], x['lon']]).meters, axis=1)
df['velo'] = df['dist'] / df['duration']
df_shift = df.shift(1)
df['a'] = (df['velo'] - df_shift['velo']) / df['duration']
proj = Proj('epsg:5243')
proj_coords = df.apply(lambda x: proj(x['lon'], x['lat']), axis=1)
df.loc[:, ['x', 'y']] = list(map(list, proj_coords))
df['spike'] = utils.find_velocity_spikes(df)
df['section'] = df['spike'].cumsum()
df['section'] = df.apply(lambda x: int(x['section']) if x['spike'] is False else -1, axis=1)
return df
|
[
"kons.koehler@gmail.com"
] |
kons.koehler@gmail.com
|
da357a59f717ba6b355112edd5e138ae805e755d
|
f93651fdbfd70aa483893ecd371218a4a71072f5
|
/commands/greet.py
|
4c1ed0f6300b67d965a2337a87dc4b2f311e25a0
|
[
"BSD-3-Clause"
] |
permissive
|
Pinacolada64/NOW
|
000a0394d2124eac936c7c97261ef99454d935dc
|
c8b1d6a47505667c7a83d3e162c53ff379c6d039
|
refs/heads/master
| 2023-08-14T20:43:06.551444
| 2023-07-18T04:01:02
| 2023-07-18T04:01:02
| 53,027,731
| 14
| 8
| null | 2023-07-18T04:01:03
| 2016-03-03T06:43:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
# -*- coding: UTF-8 -*-
from evennia import CmdSet
from evennia.utils.evmenu import EvMenu
from commands.command import MuxCommand
class ConvoCmdSet(CmdSet):
key = 'talk'
def at_cmdset_creation(self):
"""Add command to the set - this set will be attached to the vehicle object (item or room)."""
self.add(NPCConvo())
class NPCConvo(MuxCommand):
"""
Greet an NPC
Usage:
greet
list [description, response], [label, description, response], [label, description, response]]
Skyrim is a good model for gaming conversation high-end evolution: Perhaps, have the NPC wait?
Meanwhile other things can be done. If too many poses occur, then quit convo.
If three says occur, quit convo. If too much time passes, also quit the conversation.
"""
key = 'greet'
locks = 'cmd:all()'
def func(self):
EvMenu(self.caller, 'commands.greet', startnode='menu_start_node', cmd_on_exit=None, persistent=False)
CONV = [['Say Hello', 'Well, hello there! How are you this fine day?'],
['Ask about potions', 'You can buy potions of various effects in the potion shop.'],
['Ask about picking fruits', 'You can earn up to 3 silver pieces a day.'],
['Talk about weather', "Yes, it's quite foggy."]]
def menu_start_node(caller):
text = "NPC greets you."
options = ()
for each in CONV:
options += ({'desc': each[0]},)
options += ({"key": "_default", "goto": "conversation"},)
return text, options
def conversation(caller, raw_string):
inp = raw_string.strip().lower()
topics = {}
for i, each in enumerate(CONV):
topics[str(i + 1)] = CONV[i][1]
if inp in topics.keys():
text = topics[inp]
options = ({'key': "_default", 'goto': 'conversation'})
else:
text = obj.get_display_name(caller) + " nods as you end the conversation."
options = None
return text, options
|
[
"orkyporky@gmail.com"
] |
orkyporky@gmail.com
|
5a77e38b0dc9b3603f365cc6ddc687a2fe351dcb
|
deb83f04935f6f8084d6731b4b7284739abb9e1d
|
/16.py
|
07447812a9d94f2a12a1f7268d4d8502e49b1338
|
[] |
no_license
|
nilofar20/pythonnn
|
efdbe58a7d38f60117aaf77081feebc596166bb0
|
904c08bf796396759a7ec379201f3eca5d0f1d36
|
refs/heads/master
| 2020-06-11T06:41:50.720523
| 2019-07-27T08:12:22
| 2019-07-27T08:12:22
| 193,879,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
z,y=map(int,input().split())
for n in range(z,y):
if n>1:
for i in range(2,n):
if(n%i)==0:
break
else:
print(n,end=" ")
|
[
"noreply@github.com"
] |
noreply@github.com
|
0f63546dde37b4f558b0a06b0b9db13717c4d47a
|
871ad716e6e9ceaa783e5ba914fbe678d0e6819a
|
/bubbly/util.py
|
ae9aac625d05659c200c9109fbd41d1daab4dddb
|
[
"MIT"
] |
permissive
|
linan7788626/brut
|
d653b8e3110fd0025e8c5279d3a36c8acbbad3d0
|
f4223b84448d1db1b0e98e043dc6670adf05ee5d
|
refs/heads/master
| 2020-12-02T15:08:57.724313
| 2014-06-25T12:34:55
| 2014-06-25T12:34:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,461
|
py
|
import os
from cPickle import load, dump
import logging
from skimage.transform import resize
from sklearn.metrics import recall_score, auc_score
import numpy as np
def lon_offset(x, y):
"""Return angular separation between two offsets which possibly
straddle l=0
>>> lon_offset(0, 1)
1
>>> lon_offset(1, 0)
1
>>> lon_offset(0, 355)
5
>>> lon_offset(355, 0)
5
>>> lon_offset(181, 0)
179
"""
return min(abs(x - y), abs(x + 360 - y), abs(x - (y + 360)))
def up_to_date(inputs, output):
"""Test whether an output file is more recent than
a list of input files
Parameters
----------
inputs: List of strings (paths to input files)
output: string (path to output file)
Returns
-------
Boolean (True if output more recent than all inputs)
"""
if not os.path.exists(output):
return False
itime = max(os.path.getmtime(input) for input in inputs)
otime = os.path.getmtime(output)
return otime > itime
def scale(x, mask=None, limits=None):
"""Scale an array as is done in MWP paper
Sqrt transform of data cipped at 5 and 99.8%
"""
limits = limits or [5, 99.8]
if mask is None:
lo, hi = np.percentile(x, limits)
else:
lo, hi = np.percentile(x[mask], limits)
x = (np.clip(x, lo, hi) - lo) / (hi - lo)
return (np.sqrt(x) * 255).astype(np.uint8)
def resample(arr, shape):
"""Resample a 2D array, to change its shape"""
# skimage's resize needs scaled data
lo, hi = np.nanmin(arr), np.nanmax(arr)
arr = (arr - lo) / (hi - lo)
result = resize(arr, shape, mode='nearest')
return result * (hi - lo) + lo
def save_learner(clf, filename):
"""Save a scikit-learn model to a file"""
with open(filename, 'w') as outfile:
dump(clf, outfile)
def load_learner(filename):
""" Load a scikit-learn model from a file"""
with open(filename) as infile:
result = load(infile)
return result
def false_pos(Y, Yp):
return 1.0 * ((Y == 0) & (Yp == 1)).sum() / (Y == 0).sum()
def recall(Y, Yp):
return recall_score(Y, Yp)
def summary(clf, x, y):
df = clf.decision_function(x).ravel()
yp = df > 0
print 'False Positive: %0.3f' % false_pos(y, yp)
print 'Recall: %0.3f' % recall(y, yp)
print 'AUC: %0.3f' % auc_score(y, yp)
print 'Accuracy: %0.3f' % (yp == y).mean()
def roc_curve(y, yp, **kwargs):
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve as skroc
fp, tp, th = skroc(y, yp)
plt.plot(fp, tp, **kwargs)
plt.xlabel('False Positive')
plt.ylabel('True Positive')
ax = plt.gca()
ax.grid(which='major', axis='x',
linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='x',
linewidth=0.25, linestyle='-', color='0.75')
ax.grid(which='major', axis='y',
linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='y',
linewidth=0.25, linestyle='-', color='0.75')
return fp, tp
def rfp_curve(yp, Y, **kwargs):
""" Plot the false positive rate as a function of recall """
import matplotlib.pyplot as plt
npos = Y.sum()
nneg = Y.size - npos
ind = np.argsort(yp)[::-1]
y = Y[ind]
yp = yp[ind]
recall = (1. * np.cumsum(y == 1)) / npos
false_pos = (1. * np.cumsum(y == 0)) / nneg
r = 1.0 * ((yp > 0) & (y == 1)).sum() / npos
fp = 1.0 * ((yp > 0) & (y == 0)).sum() / nneg
l, = plt.plot(recall, false_pos, **kwargs)
plt.plot([r], [fp], 'o', c=l.get_color())
plt.xlabel('Recall')
plt.ylabel('False Positive')
plt.title("R=%0.3f, FP=%0.4f" % (r, fp))
ax = plt.gca()
ax.grid(which='major', axis='x',
linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='x',
linewidth=0.25, linestyle='-', color='0.75')
ax.grid(which='major', axis='y',
linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='y',
linewidth=0.25, linestyle='-', color='0.75')
return recall, false_pos
def _stamp_distances(stamps):
#compute distance matrix for a list of stamps
n = len(stamps)
result = np.zeros((n, n)) * np.nan
for i in range(n):
si = stamps[i]
xi, yi, di = si[1:4]
for j in range(i + 1, n, 1):
sj = stamps[j]
xj, yj, dj = sj[1:4]
dx = np.hypot(xi - xj, yi - yj)
if dx > max(di, dj):
continue
elif max(di / dj, dj / di) > 3:
continue
else:
d = dx / ((di + dj) / 2.)
result[i, j] = result[j, i] = d
return result
def _decimate(dist_matrix, scores):
inds = np.arange(dist_matrix.shape[0])
while True:
if ~np.isfinite(dist_matrix).any():
break
best = np.nanargmin(dist_matrix)
i, j = np.unravel_index(best, dist_matrix.shape)
merge = i if scores[i] < scores[j] else j
inds = np.delete(inds, merge)
scores = np.delete(scores, merge)
dist_matrix = np.delete(np.delete(dist_matrix, merge, 0), merge, 1)
return inds
def merge_detections(detections):
locations, scores = zip(*detections)
scores = np.array(scores)
dist = _stamp_distances(locations)
result = _decimate(dist, scores)
return np.asarray(detections)[result]
def normalize(arr):
"""Flatten and L2-normalize an array, and return"""
arr = arr.ravel().astype(np.float)
n = np.sqrt((arr ** 2).sum())
return arr / n
ely, elx = np.mgrid[:40, :40]
def ellipse(x0, y0, a, b, dr, theta0):
"""Make a 40x40 pix image of an ellipse"""
r = np.hypot(elx - x0, ely - y0)
theta = np.arctan2(ely - y0, elx - x0) - np.radians(theta0)
r0 = a * b / np.hypot(a * np.cos(theta), b * np.sin(theta))
return np.exp(-np.log(r / r0) ** 2 / (dr / 10.) ** 2)
def _sample_and_scale(i4, mips, do_scale, limits, shp=(40, 40), i3=None):
mips = np.where(mips > 0, mips, np.nan)
i4 = resample(i4, shp)
mips = resample(mips, shp)
if i3 is not None:
i3 = resample(i3, shp)
assert i4.shape == shp, i4.shape
assert mips.shape == shp, mips.shape
mask = np.isfinite(mips)
if do_scale:
try:
i4 = scale(i4, limits=limits)
mips = scale(mips, mask, limits=limits)
mips[~mask] = 255
if i3 is not None:
i3 = scale(i3, mask, limits=[1, 99])
except ValueError:
#print 'Could not rescale images (bad pixels?)'
return
else:
mips[~mask] = np.nan
b = i3 if i3 is not None else i4 * 0
rgb = np.dstack((mips, i4, b))
return rgb
def _unpack(tree):
if isinstance(tree, np.ndarray):
return tree.ravel()
return np.hstack(_unpack(t) for t in tree)
def multiwavelet_from_rgb(rgb):
from scipy.fftpack import dct
from pywt import wavedec2
r = rgb[:, :, 0].astype(np.float)
g = rgb[:, :, 1].astype(np.float)
dctr = dct(r, norm='ortho').ravel()
dctg = dct(g, norm='ortho').ravel()
daubr = _unpack(wavedec2(r, 'db4'))
daubg = _unpack(wavedec2(g, 'db4'))
return np.hstack([dctr, dctg, daubr, daubg])
def overlap(l, b, r, l0, b0, r0):
overlap = np.zeros(l.size, dtype=np.bool)
for i in range(l0.size):
dl = np.abs(l - l0[i])
db = np.abs(b - b0[i])
dr = np.maximum(dl, db)
thresh = r + r0[i]
r_ratio = np.maximum(r / r0[i], r0[i] / r)
overlap |= ((dr < thresh) & (r_ratio < 5))
return overlap
def chunk(x, n):
"""
Split a sequence into approximately n continguous chunks
Parameters
----------
x : list-like
a sequence to extract. Must support len() and slicing
Outputs
-------
A list of approximately n slices of x. The length of the list
will always be <= n
"""
nx = len(x)
if n < 1 or n > nx:
raise ValueError("n must be >0, and <= %i: %i" % (n, nx))
chunksz = int(np.ceil(1. * nx / n))
return [x[i: i + chunksz] for i in range(0, nx, chunksz)]
def cloud_map(func, args, jobs=None, return_jobs=False,
**cloud_opts):
"""
Call cloud.map, with some standard logging info
Parameters
----------
func : function to map
args : list of mapping arguments
jobs : list of pre-existing job ids, or None
If present, will fetch the results from these jobs
return_jobs : boolean (optional, default false)
If True, return the job IDs instead of
the job results
cloud_opts : dict (optional)
Extra keyword arguments to pass to cloud.map
Returns
-------
Result of cloud.map if return_jobs=False, else the job ids
"""
import cloud
cloud_opts.setdefault('_env', 'mwp')
cloud_opts.setdefault('_type', 'c2')
cloud_opts.setdefault('_label', func.__name__)
if jobs is None:
log = logging.getLogger(func.__module__)
log.debug(
"Starting %i jobs on PiCloud for %s" % (len(args), func.__name__))
jobs = cloud.map(func, args, **cloud_opts)
log.debug("To re-fetch results, use \n"
"%s(jobs=range(%i, %i))" %
(func.__name__, min(jobs), max(jobs) + 1))
if return_jobs:
return jobs
return cloud.result(jobs)
|
[
"beaumont@hawaii.edu"
] |
beaumont@hawaii.edu
|
d8fe887c75da4284e8c82abb54a0e1df679b17a1
|
7e06308b5575271bf8d7a9a6d1038b65d48f2917
|
/extension.py
|
7f48efe1aa6a7d9bb9588e0db70dde7a20d24586
|
[
"MIT"
] |
permissive
|
ComputerNetworks-UFRGS/ManP2P-ng
|
3befa44d3299c1de69019f49e3271d5c01f84a88
|
41257f46c11e30c6aa663c67c791044c04bbf4e0
|
refs/heads/master
| 2021-01-20T12:38:09.040382
| 2013-11-24T00:58:27
| 2013-11-24T00:58:27
| 14,405,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
# -*- coding: utf-8 -*-
from overlay import MessageNotHandledError
class ComponentManager:
modules = [ ]
@staticmethod
def load(data):
errors = [ ]
for d in data:
try:
m = getattr(
__import__('components.' + d['name'], fromlist=['*']),
d['class'])
except ImportError as e:
errors.append(
"Couldn't load module %s: %s" % (d['class'], str(e)))
else:
print ' > Loading %s' % (d['name'])
ComponentManager.modules.append(m)
if len(errors) > 0:
for e in errors:
print e
@staticmethod
def register(overlay):
print '-+- Registering modules -+-'
for m in ComponentManager.modules:
print ' > %s registered' % (m.getSubject())
overlay.addKindHandler(m.getSubject())
class PrivateMethodCallError(Exception):
pass
class BadMethodCallError(Exception):
pass
class ManagementComponent:
def __init__(self, transport=None):
self.transport = transport
@staticmethod
def getSubject():
raise NotImplementedError
def handle(self, message):
theCall = message.getBody().getAttribute("call")
try:
assert (theCall[:2] != '__')
getattr(self, theCall)(message)
except AssertionError:
raise PrivateMethodCallError, '''%s called through %s''' % (
theCall, message.toprettyxml())
except AttributeError as e:
raise AttributeError, '''%s called through %s''' % (
theCall, message.toprettyxml())
except RuntimeError as e:
raise e
except Exception as e:
raise BadMethodCallError, '''%s called through %s''' % (
theCall, message.toprettyxml())
class PluginManager:
plugins = { }
@staticmethod
def load(data):
errors = [ ]
for d in data:
try:
m = getattr(
__import__('plugins.' + d['name'], fromlist=['*']),
d['class'])
except ImportError as e:
errors.append(
"Couldn't load plug-in %s: %s" % (d['class'], str(e)))
else:
print ' > Loading %s' % (d['name'])
PluginManager.plugins[d['name']] = m
if len(errors) > 0:
for e in errors:
print e
class PlugIn:
pass
|
[
"fabiojrb@gmail.com"
] |
fabiojrb@gmail.com
|
09e7fc498c8663d84e8683b399ecfb42c89177b5
|
b290171490c849a5248396a2ee980a42815935c1
|
/delete_wo_uv.py
|
5cc11994ede008bc761f574f645d30d3226549b8
|
[] |
no_license
|
jiangzhongshi/BlenderScripts
|
787f12cc37e8d71b03bc68b9876733ee184c5f76
|
835ccf9a90739e967989b7b9620cc692e545f85e
|
refs/heads/master
| 2021-01-25T06:36:10.874329
| 2020-05-20T21:01:32
| 2020-05-20T21:01:32
| 93,593,725
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
import re
def check_uv(f):
with open(obj) as f:
for l in f:
if l[0] == 'f':
slash = l.find('/')
if slash != -1 and l[slash+1] != '/':
return True # UV exists leave untouched
else:
return False
if __name__ == '__main__':
import sys, glob, os
d = sys.argv[1]
all_files = glob.glob(d+'/*.[Oo][Bb][Jj]')
for obj in all_files:
if not check_uv(obj):
os.remove(obj)
|
[
"jiangzs@nyu.edu"
] |
jiangzs@nyu.edu
|
8dcecbb2db91f0781c70434f88392b4d940ba544
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/ankiandroid/testcase/firstcases/testcase4_014.py
|
be1af9489a3d62454bebe7063c442ec02f4fe4d7
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,621
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.ichi2.anki',
'appActivity' : 'com.ichi2.anki.IntentHandler',
'resetKeyboard' : True,
'androidCoverage' : 'com.ichi2.anki/com.ichi2.anki.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase014
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_sync\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Default\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_sync\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Custom study session\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Options\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Define custom steps\")", "new UiSelector().className(\"android.widget.TextView\").instance(11)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\" rated:1:1\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys(" rated:1:1");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("99999");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("0");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("1");
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_014\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.ichi2.anki'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
2842a61d2bc8bd155811ada5e647135f21f9b544
|
b482b2b957fedbe331a957b12f2ec5b42b6983f6
|
/code/06/assembler/SymbolTable.py
|
d2ffb61d958d101c107d0fac39898296a2b7429f
|
[] |
no_license
|
avidsapp/nand2tetris
|
4d735f6d3c137055c6c9dc31d0489492a174cbaf
|
f93c8731dc8e65d0d09653cf72ce0fd56e4e7391
|
refs/heads/master
| 2021-06-21T14:15:07.672570
| 2017-08-14T16:10:47
| 2017-08-14T16:10:47
| 100,281,938
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
#!~/.pyenv/shims/python3
# User specific shebang line. Change to your Python interpreter dir
"""
SymbolTable.py is a hash-table store used to store and resolve symbols with their associated addresses
"""
class SymbolTable(dict):
def __init__(self):
super().__init__()
self.update({ #pre-defined symbols and memory locations
'SP': 0,
'LCL': 1,
'ARG': 2,
'THIS': 3,
'THAT': 4,
'R0': 0,
'R1': 1,
'R2': 2,
'R3': 3,
'R4': 4,
'R5': 5,
'R6': 6,
'R7': 7,
'R8': 8,
'R9': 9,
'R10': 10,
'R11': 11,
'R12': 12,
'R13': 13,
'R14': 14,
'R15': 15,
'SCREEN': 0x4000,
'KBD': 0x6000
})
def add_entry(self, symbol, address): # add symbol to symbol table
self[symbol] = address
def contains(self, symbol): # check if symbol is in the symbol table
return symbol in self
def get_address(self, symbol): # get memore/instruction address of symbol in symbol table
return self[symbol]
|
[
"david@mobius.digital"
] |
david@mobius.digital
|
a1dc42cf0d23cc2c38aa40141b882672f825d98e
|
0e02ddbeec803067bc86b4675f2ff8ff2d388e62
|
/people/urls.py
|
d33d1425dfff5e46007582aec9f7bd16b5f13180
|
[] |
no_license
|
BruceMWhealton/familytree
|
4ef6a8cce5ebf23a89b2e05a915d33a13c9b5b5e
|
c8e8fe0278615a9803c2c89c13e8ea8f5804ddee
|
refs/heads/master
| 2021-08-28T10:46:33.482088
| 2017-12-12T01:17:57
| 2017-12-12T01:17:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
from django.conf.urls import url
from people import views
urlpatterns = [
url(r'^$', views.index, name='people.index'),
url(r'^person/(?P<person_id>\d+)/$', views.person, name='person'),
url(r'^person/(?P<person_id>\d+)/edit/$', views.edit_person, name='edit_person'),
url(r'^person/(?P<person_id>\d+)/relatives/$', views.relatives, name='relatives'),
url(r'^person/(?P<person_id>\d+)/relatives/map/$', views.relatives_map, name='relatives_map'),
url(r'^person/(?P<person_id>\d+)/descendants/$', views.descendants, name='descendants'),
url(r'^person/(?P<person_id>\d+)/descendants/map/$', views.descendants_map, name='descendants_map'),
url(r'^person/(?P<person_id>\d+)/descendants/tree/$', views.descendants_tree, name='descendants_tree'),
url(r'^person/(?P<person_id>\d+)/descendants/tree/svg/$', views.descendants_tree_svg, name='descendants_tree_svg'),
url(r'^person/(?P<person_id>\d+)/ancestors/$', views.ancestors, name='ancestors'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/$', views.ancestors_report, name='report'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/undead/$',
views.ancestors_report_undead,
name='report_undead'),
url(r'^person/(?P<person_id>\d+)/ancestors/report/maiden-names/$',
views.ancestors_report_maiden_names,
name='report_maiden_names'),
url(r'^report/alive/(?P<year>\d+)/$', views.alive_in_year, name='alive_in_year'),
url(r'^person/(?P<person_id>\d+)/ancestors/map/$', views.ancestors_map, name='ancestors_map'),
url(r'^person/(?P<person_id>\d+)/ancestors/ringchart/$', views.ring_chart, name='ring_chart'),
url(r'^person/(?P<person_id>\d+)/ancestors/ringchart/svg/$', views.ring_chart_svg, name='ring_chart_svg'),
url(r'^location/(?P<location_id>\d+)/$', views.location, name='location'),
url(r'^region/(?P<region_name>[\w\W]+)/$', views.region, name='region'),
url(r'^surname/(?P<surname>[\w\W]+)/$', views.surname, name='surname'),
url(r'^forename/(?P<forename>[\w\W]+)/$', views.forename, name='forename'),
url(r'^tag/(?P<slug>[\w-]+)/$', views.tag, name='tag'),
url(r'^person/add/$', views.add_person, name='add_person'),
url(r'^location/add/$', views.add_location, name='add_location'),
url(r'^public/surnames/$', views.surnames, name='surnames'),
]
|
[
"dan@uncommons.org"
] |
dan@uncommons.org
|
ce641dbede6f04804b41a0a8460de2268bda2a1e
|
b87f66b13293782321e20c39aebc05defd8d4b48
|
/maps/build/TraitsGUI/enthought/pyface/key_pressed_event.py
|
0072c81aca8d415f585dffb9111cb439544649df
|
[
"BSD-3-Clause"
] |
permissive
|
m-elhussieny/code
|
5eae020932d935e4d724c2f3d16126a0d42ebf04
|
5466f5858dbd2f1f082fa0d7417b57c8fb068fad
|
refs/heads/master
| 2021-06-13T18:47:08.700053
| 2016-11-01T05:51:06
| 2016-11-01T05:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
""" The event that is generated when a key is pressed. """
# Enthought library imports.
from enthought.traits.api import Bool, HasTraits, Int, Any
class KeyPressedEvent(HasTraits):
""" The event that is generated when a key is pressed. """
#### 'KeyPressedEvent' interface ##########################################
# Is the alt key down?
alt_down = Bool
# Is the control key down?
control_down = Bool
# Is the shift key down?
shift_down = Bool
# The keycode.
key_code = Int
# The original toolkit specific event.
event = Any
#### EOF ######################################################################
|
[
"fspaolo@gmail.com"
] |
fspaolo@gmail.com
|
6e4847b3cbbfd143ca52188e94d598173a3e3fd7
|
31379309fedf19f04b307dab141a969ea38ebe9b
|
/button.py
|
596dd06a7ed83e216eab138dcbdb90c9443a7a85
|
[] |
no_license
|
Alexandros-Panagiotopoulos/alien-invasion
|
b3c3496323946c8774fcdd90bcc5e17a8fb253fe
|
a6728d8022b3305a4f4019ffccaefea2f079fc41
|
refs/heads/master
| 2020-03-22T14:31:45.919567
| 2018-07-15T04:28:33
| 2018-07-15T04:28:33
| 140,186,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
"""Initialize buttons attributes"""
self.screen = screen
self.ai_settings = ai_settings
self.screen_rect = screen.get_rect()
# Set the dimensions and properties of the button
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# Thebutton message needs to be prepped only once
self.prep_msg(msg)
def prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the bottom"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
|
[
"panagiotopoulos.alexandros@gmail.com"
] |
panagiotopoulos.alexandros@gmail.com
|
7e5e4f719b75a501b9e069ca581e0344b89df260
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17r_1_01a/brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/__init__.py
|
c69003159bbe860e7c9e51d4eb0d278fd1ce133e
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,711
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/clear-mpls-auto-bandwidth-statistics-lsp/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'clear-mpls-auto-bandwidth-statistics-lsp', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'clear-mpls-auto-bandwidth-statistics-lsp', u'input']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/lsp_name (string)
YANG Description: LSP Name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP Name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
lsp_name = __builtin__.property(_get_lsp_name, _set_lsp_name)
_pyangbind_elements = {'lsp_name': lsp_name, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
0653e3849f60d88cb438d1f81e12055160057477
|
c71bdf7d24207c2a39c3ad7be3ede1043be81569
|
/tensorflow/dataset/datasetbase.py
|
56ca81ab1e6b73dfceac8ee6992620267f6f7a45
|
[
"MIT"
] |
permissive
|
Red-Eyed/dnn_models
|
72056b621e7d6e186c6993edfcdfa951e985b9b9
|
66bee227e66e5432cf5ddecdc6cd18f5dcf372ba
|
refs/heads/master
| 2020-04-10T09:55:17.578256
| 2019-09-10T09:56:58
| 2019-09-10T09:56:58
| 160,950,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from abc import ABC, abstractmethod
class DatasetBase(ABC):
def __init__(self, batch_size, prefetch_size, shuffle_size):
self.batch_size = batch_size
self.prefetch_size = int(prefetch_size)
self.shuffle_size = int(shuffle_size)
self.train_init_op = None
self.test_init_op = None
self.x = None
self.y = None
self.train_size = None
self.test_size = None
@abstractmethod
def show(self):
raise NotImplementedError
|
[
"vadim.stupakov@gmail.com"
] |
vadim.stupakov@gmail.com
|
d3d78b6323a240c91879de1759e6bfd3665bf0bc
|
fd4d6d898a7c2114ca214657bc5ed448540859c6
|
/18. Day 18/1/main.py
|
0fa2b58931ca44a815e90eac1dcdbd2d322fee2b
|
[] |
no_license
|
TonyBiz9999/python
|
ecccf30966a3370b73b1cf9b84c0ff48fd34b790
|
983d9f7410fa0f15a1428191210bf8d8dbcc6ac8
|
refs/heads/master
| 2023-08-01T16:48:09.192126
| 2021-09-10T13:51:52
| 2021-09-10T13:51:52
| 403,551,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
from turtle import Turtle, Screen
import turtle as t
import random
# timmy = Turtle()
# for i in range (0,4):
# timmy.forward(100)
# timmy.right(90)
t.colormode(255)
arrow = Turtle()
# for i in range(15):
# arrow.forward(10)
# arrow.penup()
# arrow.forward(10)
# arrow.pendown()
corlor_list=["orange red", "indigo", "magenta", "deep sky blue"]
# def draw_shape (side_number):
# d_degree =360/side_number
# for _ in range(side_number):
# arrow.forward(100)
# arrow.right(d_degree)
# for choice in range (3,8):
# arrow.color(random.choice(corlor_list))
# draw_shape(choice)
def random_corlor():
r = random.randint(0, 255)
b = random.randint(0, 255)
g = random.randint(0, 255)
ran_corlor = (r,b,g)
return ran_corlor
# d_degree =[0,90,180,270]
#
# for _ in range(100):
# arrow.pencolor(random_corlor())
# arrow.forward(30)
# a = random.choice(d_degree)
# arrow.setheading(a)
def set_circle(choice):
global arrow
for _ in range(int(360/choice)):
arrow.circle(100)
arrow.color(random_corlor())
arrow.setheading(arrow.heading()+choice)
set_circle(5)
screen = Screen()
screen.exitonclick()
|
[
"tienbui.media@gmail.com"
] |
tienbui.media@gmail.com
|
4ea1a680bf746e4095f60102406d9cd4862f5d90
|
8794c1e2c37c91b6fc84ad9dade98ce3e01975dd
|
/XML_Database.py
|
416fa1a590ad4c933db130bd1b507bcc48f2725b
|
[] |
no_license
|
LanHikari22/GModDB
|
818ce2a23f7c796d079ca7f1191f3e319fb809a6
|
6b1bad21965793b7f59ad1f971d6f2ba783db4d0
|
refs/heads/master
| 2021-01-23T12:43:43.599137
| 2017-06-09T13:49:27
| 2017-06-09T13:49:27
| 93,187,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
class Database:
def __init__(self):
pass
|
[
"lanhikarixx@gmail.com"
] |
lanhikarixx@gmail.com
|
0208d8c9105a00b94dcccc70689221f0e0f20db1
|
7d7e1993e18dfd0ece507629e4cc13092a20d718
|
/hello.py
|
806d56aac9bae17d44f0521e3802737eea0947fb
|
[] |
no_license
|
thiswind-inclass-example/hello_echarts
|
a1df604609f0549a670eb015941592f312e4dc69
|
c6de8090394e17cb73b4f700f9f7361b9270ada0
|
refs/heads/main
| 2023-06-04T00:48:19.337261
| 2021-06-25T10:15:15
| 2021-06-25T10:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
from flask import Flask, render_template
from pyecharts.charts import Bar
import pyecharts
app = Flask(__name__)
@app.route('/')
def index():
page_contnet = """<html>
<header>
<title>Hello Echarts</title>
</header>
<body>
<p>
<a href='/1'>查看 1</a>
</p>
<p>
<a href='/10'>查看 10</a>
</p>
<p>
<a href='/100'>查看 100</a>
</p>
<p>
<a href='/1000'>查看 1000</a>
</p>
<p>
<a href='/10000'>查看 10000</a>
</p>
</body>
</html>
"""
return page_contnet
@app.route('/<number>')
def hello(number):
bar = Bar()
bar.add_xaxis(["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"])
bar.add_yaxis("商家A", [5, 20, 36, 10, 75, number])
bar.render('templates/index.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
[
"thiswind@gmail.com"
] |
thiswind@gmail.com
|
d94eac8d709bc2eb7d4ac8bf5765e1247c5dc9c7
|
67ceb35320d3d02867350bc6d460ae391e0324e8
|
/practice/easy/0231-Power_of_Two.py
|
2937bb2cf84cca61705c0d77d1f846cbe4ef3766
|
[] |
no_license
|
mattjp/leetcode
|
fb11cf6016aef46843eaf0b55314e88ccd87c91a
|
88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70
|
refs/heads/master
| 2023-01-22T20:40:48.104388
| 2022-12-26T22:03:02
| 2022-12-26T22:03:02
| 184,347,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
while n > 0 and n % 2 == 0:
n /= 2
return True if n == 1 else False
|
[
"noreply@github.com"
] |
noreply@github.com
|
b66a4a2f642675fb2d503b1ba324ad2dfb908ba2
|
6f872bc6ee101f165c3dbf19e3e64283b4d4e391
|
/flaskr/flaskr.py
|
024259837e66338450c6f12d1a81b250ebec8540
|
[] |
no_license
|
zsy23/CMU_15_441_P1_WebServer
|
d088a762ae293d157d4998e942b7f7238a1893e7
|
e589d15bcbbf1cc56c99b22d73633780fdaa4237
|
refs/heads/master
| 2020-06-12T23:53:18.199418
| 2017-01-28T06:24:08
| 2017-01-28T06:24:08
| 75,475,379
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,329
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os, sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
# From PEP 333: http://www.python.org/dev/peps/pep-0333/
############### BEGIN WSGI WRAPPER ##############
def run_with_cgi(application):
environ = dict(os.environ.items())
environ['wsgi.input'] = sys.stdin
environ['wsgi.errors'] = sys.stderr
environ['wsgi.version'] = (1, 0)
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = True
if environ.get('HTTPS', 'on') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
def write(data):
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers
status, response_headers = headers_sent[:] = headers_set
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.1')
http_connection = environ.get('HTTP_CONNECTION','close')
sys.stdout.write('%s %s\r\n' % (http_version, status))
sys.stdout.write('Connection: %s\r\n' % (http_connection))
for header in response_headers:
sys.stdout.write('%s: %s\r\n' % header)
sys.stdout.write('\r\n')
sys.stdout.write(data)
sys.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
result = application(environ, start_response)
try:
for data in result:
if data: # don't send headers until body appears
write(data)
if not headers_sent:
write('') # send headers now if body was empty
finally:
if hasattr(result, 'close'):
result.close()
############### END WSGI WRAPPER ##############
if __name__ == '__main__':
run_with_cgi(app)
|
[
"1181856726@qq.com"
] |
1181856726@qq.com
|
ecd0c908b212976fa19ea77f224a14198775ffde
|
6d86f4c4af6a1619ca651439437f71d7b77d23bd
|
/137_single-number-ii.py
|
ceb436c309bc077bcc559e641e9aa5ba5e70bd21
|
[] |
no_license
|
kooshine/LeetCode
|
653e7bc9254ef3000b65d2929bf522d19a0deea6
|
14b0e09230620824cd1a09c015c29f2dfea932cf
|
refs/heads/master
| 2020-11-29T22:29:34.808235
| 2020-05-25T13:54:27
| 2020-05-25T13:54:27
| 230,230,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
# -*- coding: utf-8 -*-
#https://leetcode-cn.com/problems/single-number-ii/
|
[
"18919027903@163.com"
] |
18919027903@163.com
|
f56a99f41b31f61b5cda6eaf7c5801aafb70c027
|
f2f2e1af1d5fcf33aad4e5b3637d9ed7cdf32d40
|
/VentInfo.net/home/nick/csc344/ventcomments/get-comments.py
|
4378a46416bb3e01a14ea95aca36580bab8ad199
|
[] |
no_license
|
KWMalik/CSC344-A3-Python
|
d4b43b44500ed37627d5646d92d9c4ad848d2309
|
671401b35ff44300223947d6904521a089c690d9
|
refs/heads/master
| 2020-12-25T13:07:57.152119
| 2012-05-17T14:22:08
| 2012-05-17T14:22:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
#!/usr/bin/python
####################################################
# Nicholas Poorman - 2010
# This program will parse the information
# returned from the ventrilo_status program
# and persist it into the sqlite3 databse file
#
# Be sure to set the rootdir of the comments.db file
#
####################################################
import re
import subprocess
from datetime import datetime
import sqlite3
rootdir = '/home/nick/csc344/ventcomments/'
conn = sqlite3.connect(rootdir+'comments.db', detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
conn.row_factory = sqlite3.Row
c = conn.cursor()
rg = re.compile('(?:CLIENT:).*?NAME=(.*?),COMM=(.{1,127})',re.IGNORECASE)
p = subprocess.Popen(rootdir+'ventrilo_status -c2 -tvent.ifgnet.net:32162', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
m = rg.search(line)
if m:
user=m.group(1)
comment=m.group(2)
print("test")
item = c.execute('select * from comments where comment_user=? and comment_text=?', (user, comment)).fetchone()
if item is None:
c.execute('insert into comments (comment_id, comment_user, comment_text, comment_date) values(NULL, ?, ?, ?)', (user, comment, datetime.now()))
conn.commit()
|
[
"nickpoorman@gmail.com"
] |
nickpoorman@gmail.com
|
e32fbfc28aaebe4651e0b4db22c3c3e34a56302e
|
82e021f8de203648418ef164f57570e11fe93f11
|
/test.py
|
103a9d85894016738288eb3355a24b64a15fe0fa
|
[] |
no_license
|
ivanatome/upi_ivanatome
|
6b6135de6c118e6f88901f1cf779ef93fa0e7f7a
|
91a1ce3144d1d4985569b78b74545f6ac8651aee
|
refs/heads/master
| 2020-09-17T05:31:04.560376
| 2020-01-30T11:01:10
| 2020-01-30T11:01:10
| 224,006,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
import unittest
from korisnici import Korisnici
import os, sys, sqlite3, datetime
from baza_metode import DRUGA, TRECA, MOJIPOSTOVI, BRISANJE, KOMENTIRANJE, LAJK, PRIKAZKOMENTARA, NASLOVNICAPOREDAKLAJK, NASLOVNICAPOREDAKKOM, MOJPROFILadd, MOJPROFIL, NASLOVNICA, AZURIRANJE, FILTER
from korisnici import Korisnici
dirname = os.path.dirname(sys.argv[0])
con=sqlite3.connect('data\\drustvenamreza.db')
cur=con.cursor()
class TestKorisnici(unittest.TestCase):
def test_init_value_error_brojznakova(self):
#username ima manje od 5 znakova
with self.assertRaises(ValueError):#5.misto
Korisnici("","","","","abc","","","","","","","","")
def test_init_value_error_znakmail(self):
#neispravan mail, mora imat znak @
with self.assertRaises(ValueError):
Korisnici("","","","tm@","abc","","","","","","","","")
def test_init_value_error_kratkalozinka(self):
#lozinka kraca od 4 znaka
with self.assertRaises(ValueError):
Korisnici("","","","","","955","","","","","","","")
def test_baza_komentar(self):
KOMENTIRANJE(1002,"nekikomentar",502)
con=sqlite3.connect('data\\drustvenamreza.db')
cur=con.cursor()
idd=cur.execute('SELECT * FROM sqlite_sequence').fetchall()
seq=idd[2][1]
print("SEKVENCA ",seq)
row=cur.execute('SELECT * FROM komentar WHERE id_komentara=(?)',(seq,)).fetchone()
print(row)
self.assertEqual(row,(seq,'nekikomentar', 1002, 502))
cur.execute('DELETE FROM komentar WHERE id_komentara=(?)',(seq,))
con.commit()
def test_baza_objava(self):
idObjave=MOJPROFIL(500,"objava","20:45h",5,11)
con=sqlite3.connect('data\\drustvenamreza.db')
cur=con.cursor()
v=cur.execute('SELECT * from objava where id_objave=(?)',(idObjave,)).fetchone()
self.assertEqual(v,(idObjave, 500, 'objava', '20:45h', 5, 11))
con=sqlite3.connect('data\\drustvenamreza.db')
cur=con.cursor()
cur.execute('DELETE from objava WHERE id_objave=(?)',(idObjave,))
con.commit()
unittest.main()
|
[
"ivanaradalj1234@gmail.com"
] |
ivanaradalj1234@gmail.com
|
61d0306f57193f18fb78a7a65113f9032dece319
|
62b4b732a3af0e9df01f310279381edd0dfb49a8
|
/src/webapps/HyperMap/views.py
|
834f12e483def59a071f21f013ddfd98e92451c7
|
[] |
no_license
|
JiawenPengV/HyperMap
|
b51c8f432cbb7c20c45873a05e7295c1a1c18c3b
|
3f55390e21ef7da45ff8f409f1a9fe6c0d88b9ed
|
refs/heads/master
| 2021-09-01T00:30:27.432447
| 2017-12-23T21:09:16
| 2017-12-23T21:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,475
|
py
|
from HyperMap.models import *
from HyperMap.forms import *
from HyperMap.serializers import *
from datetime import datetime
from rest_framework import status
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser, MultiPartParser
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.views import exception_handler
from rest_framework.decorators import authentication_classes, permission_classes
from rest_framework.authtoken.views import obtain_auth_token
import json
from rest_framework.request import Request
from django.utils.functional import SimpleLazyObject
from django.contrib.auth.middleware import get_user
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core import serializers
from django.db.models import Count, Case, When, Value, IntegerField
import random
from django.db.models import Q
import time
import pytz
import operator
# Create your views here.
def home(request):
return redirect('/main/')#return render(request, 'index.html', {})
#@api_view(["GET"])
#@authentication_classes((TokenAuthentication,))
#@permission_classes((IsAuthenticated,))
def react_render(request):
return render(request, 'index.html', {})
#@api_view(["GET", "POST"])
@csrf_exempt
def register(request):
#if request.user.is_authenticated():
# return redirect('/main/')
if request.method == 'GET':
return render(request, 'index.html', {})
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = UserSerializer(data=data)#data)
# print data
#print(serializer)
if serializer.is_valid():
serializer.save()
new_user = User.objects.get(username=serializer.data['username'])
token = default_token_generator.make_token(new_user)
email_body = """
Welcome to HyperMap. Please click the link below to verify your email address and complete the registration of your account:
http://%s%s
""" % (request.get_host(), reverse('confirm_registration', kwargs={'id':new_user.id, 'token':token}))
send_mail(subject="[HyperMap]Verify your email address", message=email_body, from_email="admin@hypermap.com", recipient_list=[new_user.email])
return JsonResponse({'success': 'You register account successfully!'}, status=201)
return JsonResponse(serializer.errors, status=400)
def confirm_registration(request, id, token):
try:
user = User.objects.get(id=id)
except ObjectDoesNotExist:
return redirect("/error/")# JsonResponse({'errors': 'User does not exist!'}, status=400)
if user != None and default_token_generator.check_token(user, token):
user.is_active = True
user.save()
return redirect("/main/")
else:
return redirect("/error/")# JsonResponse({'errors': 'Wrong confirmation link!'}, status=400)
@api_view(["GET","POST"])
@csrf_exempt
def user_login(request):
if request.method == "GET":
return render(request, 'index.html', {})
data = JSONParser().parse(request)
#if not "username" in data or not "password" in data:
# return JsonResponse({'errors': 'Please enter username and password!'}, status=400)
if not data["username"] or not data["password"]:
return JsonResponse({'error': 'Please enter username and password!'}, status=400)
try:
user = User.objects.get(username=data["username"])
except ObjectDoesNotExist:
return JsonResponse({'error': 'User does not exist!'}, status=400)
if not user.check_password(data["password"]):
return JsonResponse({'error': 'Wrong password!'}, status=403)
if not user.is_active:
return JsonResponse({'error': 'Need confirmation!'}, status=403)
token, created = Token.objects.get_or_create(user=user)
return JsonResponse({'token': token.key}, status=200)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_all_user(request):
#print(request.user)
users = User.objects.exclude(is_active=False).exclude(username=request.user.username).values('username', 'id')
user_list = list(users)
return JsonResponse({'users': user_list}, status=200)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_user_info(request):
serializer = UserSerializer(request.user)
return Response(serializer.data)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_user_profile(request):
profile = Profile.objects.get(user=request.user)
serializer = ProfileSerializer(profile)
return Response(serializer.data)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def edit_profile(request):
#data = JSONParser().parse(request)
profile = Profile.objects.get(user=request.user)
serializer = ProfileSerializer(profile, data=request.data, partial=True)
# print(serializer)
if serializer.is_valid():
serializer.save()
return JsonResponse({'success': 'Profile is edited successfully!'}, status=202)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_recommendation_events(request):
profile = Profile.objects.get(user=request.user)
# print(len(profile.footprint.all()))
if len(profile.footprint.all()) == 0:
events = Event.objects.exclude(creator=request.user.id).exclude(session_end__lt=datetime.now()).annotate(num_registered=Count('registered')).order_by('-num_registered').values('id','name','category')[:5]
event_list = list(events)
else:
rec = {}
for registered_event in profile.footprint.all():
registered_users = registered_event.registered.exclude(username=request.user.username)
for user in registered_users:
registered_profile = Profile.objects.get(user=user)
events = registered_profile.footprint.exclude(pk__in=profile.footprint.all().values('id')).exclude(session_end__lt=datetime.now())
for event in events:
if event.pk in rec:
rec[event.pk] += 1
else:
rec[event.pk] = 1
sorted_rec = sorted(rec.items(), key=operator.itemgetter(1), reverse=True)[:5]
rec_list = []
for rec in sorted_rec:
rec_list.append(rec[0])
events = Event.objects.filter(pk__in=rec_list).values('id','name','category')
event_list = list(events)
if len(event_list) < 5:
more_events = Event.objects.exclude(pk__in=profile.footprint.all().values('id')).exclude(pk__in=events.values('id')).exclude(creator=request.user.id).exclude(session_end__lt=datetime.now()).annotate(num_registered=Count('registered')).order_by('-num_registered').values('id','name','category')[:5-len(event_list)]
for event in list(more_events):
event_list.append(event)
for event in event_list:
event['image'] = Event.objects.get(id=event['id']).image.url
return JsonResponse({'events': event_list}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def create_event(request):
serializer = EventSerializer(data=request.data, context=request)
# print(serializer)
if serializer.is_valid():
serializer.save()
# debug = serializer.data
return JsonResponse({'success': 'Event is created successfully!'}, status=201)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_event(request):
data = JSONParser().parse(request)
try:
event = Event.objects.get(pk=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
event = Event.objects.filter(pk=data['id']).values('id', 'name', 'content', 'category', 'session_begin', 'session_end', 'position',
'lat', 'lng', 'creator')
event = list(event)[0]
profile = Profile.objects.get(user=request.user)
if request.user.id == event['creator']:
unconfirmed = list(Event.objects.get(id=event["id"]).unconfirmed.all().values('id','username','first_name','last_name'))
event["unconfirmed"] = unconfirmed
registered = list(Event.objects.get(id=event["id"]).registered.all().values('id','username','first_name','last_name'))
event["registered"] = registered
else:
if profile.footprint.filter(pk=event['id']):
event["isRegistered"] = True
else:
event["isRegistered"] = False
if event['session_end'] < datetime.now():
event["isValid"] = False
else:
event["isValid"] = True
event['creator_firstname'] = User.objects.get(id=event['creator']).first_name
event['creator_lastname'] = User.objects.get(id=event['creator']).last_name
event['creator'] = User.objects.get(id=event['creator']).username
event['present_user'] = request.user.username
return JsonResponse({'eventInfo': event}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def register_event(request):
data = JSONParser().parse(request)
try:
event = Event.objects.get(pk=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
data['registered'] = [request.user.id]
serializer = EventSerializer(event, data=data, partial=True)
# print(serializer)
if serializer.is_valid():
serializer.save()
# print(len(event.registered.all()))
if event.registered.filter(id=request.user.id).exists():
return JsonResponse({'success': 'You register this event successfully!'}, status=202)
else:
return JsonResponse({'success': 'You unregister this event successfully!'}, status=202)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_filtered_events(request):
data = JSONParser().parse(request)
selectedlist = data["selectedlist"].split(',')
# transfer timestamp to datetime object
selecteddate = datetime.fromtimestamp(data["selecteddate"]).date()
# print(selecteddate)
events = Event.objects.filter(category__in=selectedlist, session_begin__date__lte=selecteddate, session_end__date__gte=selecteddate).values('id','name','category','lat','lng')
event_list = list(events)
for event in event_list:
event['image'] = Event.objects.get(id=event['id']).image.url
event['lat'] += (random.uniform(0, 1) - 0.5) / 2500
return JsonResponse({'events': event_list, 'user': request.user.id}, status=200)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_all_events(request):
events = Event.objects.annotate(num_registered=Count('registered')).annotate(
is_valid=Case(
When(session_end__gt=datetime.now(),
then=Value(1)),
default=Value(0),
output_field=IntegerField())).order_by('-is_valid','session_begin','-num_registered').values('id','name','category')
event_list = list(events)
for event in event_list:
event['image'] = Event.objects.get(id=event['id']).image.url
#print(event_lists)
return JsonResponse({'events': event_list}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_registered_users(request):
data = JSONParser().parse(request)
try:
event = Event.objects.get(pk=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
users = event.registered.all().values('first_name','last_name')
user_list = list(users)
return JsonResponse({'users': user_list}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_group_users(request):
data = JSONParser().parse(request)
try:
group = Group.objects.get(name=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Group does not exist or has already been deleted!'}, status=400)
users = group.member.all().values('first_name','last_name')
user_list = list(users)
return JsonResponse({'users': user_list}, status=200)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_user_events(request):
profile = Profile.objects.get(user_id=request.user.id)
events = profile.posted_event.annotate(num_registered=Count('registered')).annotate(
is_valid=Case(
When(session_end__gt=datetime.now(),
then=Value(1)),
default=Value(0),
output_field=IntegerField())).order_by('-is_valid','session_begin','-num_registered').values('id', 'name', 'category')
event_list = list(events)
for event in event_list:
event["registered"] = len(Event.objects.get(id=event['id']).registered.all())
event["unconfirmed"] = len(Event.objects.get(id=event['id']).unconfirmed.all().values('id'))
event['image'] = Event.objects.get(id=event['id']).image.url
# print(event_lists)
return JsonResponse({'events': event_list}, status=200)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_footprint(request):
profile = Profile.objects.get(user=request.user)
events = profile.footprint.annotate(num_registered=Count('registered')).annotate(
is_valid=Case(
When(session_end__gt=datetime.now(),
then=Value(1)),
default=Value(0),
output_field=IntegerField())).order_by('-is_valid','session_begin','-num_registered').values('id', 'name', 'category')
event_list = list(events)
for event in event_list:
event['image'] = Event.objects.get(id=event['id']).image.url
return JsonResponse({'events': event_list}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def create_group(request):
data = JSONParser().parse(request)
if Group.objects.filter(name=data['name']).exists():
return JsonResponse({'error': 'This group name has already been registered.'}, status=400)
data['creator'] = request.user.id
#print(data)
data['member'] = [request.user.id]
for member in data['members']:
data['member'].append(member['id'])
serializer = GroupSerializer(data=data)
#print(serializer)
if serializer.is_valid():
serializer.save()
return JsonResponse({'success': 'Group is created successfully!'}, status=201)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
@api_view(["GET"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def get_groups(request):
groups = Group.objects.filter(member__id__contains=request.user.id).values('name','creator')
group_list = list(groups)
for group in group_list:
group["number"] = len(Group.objects.get(name=group["name"]).member.all())
group["creator"] = User.objects.get(id=group["creator"]).username
if request.user.username == group["creator"]:
group["isCreator"] = True
else:
group["isCreator"] = False
return JsonResponse({'groups': group_list}, status=200)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def confirm_event(request):
data = JSONParser().parse(request)
try:
event = Event.objects.get(pk=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
# data['unconfirmed'] = [request.user.id]
data['unconfirmed'] = [data['user']]
serializer = EventSerializer(event, data=data, partial=True)
if serializer.is_valid():
serializer.save()
# debug = serializer.data
return JsonResponse({'success': 'This user is confirmed successfully!'}, status=202)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def withdraw_group(request):
data = JSONParser().parse(request)
try:
group = Group.objects.get(name=data['group'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Group does not exist or has already been deleted!'}, status=400)
if group.creator.id == request.user.id:
group.delete()
return JsonResponse({'success': 'Group is deleted successfully!'}, status=202)
else:
group.member.remove(request.user)
group.save()
return JsonResponse({'success': 'Group is withdrawn successfully!'}, status=202)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def delete_event(request):
data = JSONParser().parse(request)
try:
event = Event.objects.get(pk=data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
if event.creator != request.user:
return JsonResponse({'error': 'Your are not the creator of this event!'}, status=403)
event.delete()
return JsonResponse({'success': 'Event is deleted successfully!'}, status=202)
@api_view(["POST"])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def edit_event(request):
try:
event = Event.objects.get(id=request.data['id'])
except ObjectDoesNotExist:
return JsonResponse({'error': 'Event does not exist or has already been deleted!'}, status=400)
if event.creator != request.user:
return JsonResponse({'error': 'Your are not the creator of this event!'}, status=403)
serializer = EventSerializer(event, data=request.data, partial=True)
# print(serializer)
if serializer.is_valid():
serializer.save()
return JsonResponse({'success': 'Event is edited successfully!'}, status=202)
# print(serializer.errors)
return JsonResponse(serializer.errors, status=400)
def error404(request):
return redirect('/error/')
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
#print (response.status_code)
if response is not None:
response.data['status_code'] = response.status_code
if response.status_code == 401:
return redirect('/login/')
#if response.status_code == 400:
# return redirect('/login/')
return response
|
[
"JiawenPeng@outlook.com"
] |
JiawenPeng@outlook.com
|
e66e5b07f2b980fa29cac57dc2a86131d909b972
|
a645dee437b903f34fdcb9539edfc273cd66581f
|
/Class/database.py
|
9d3dddc260bc3b8841dfae02bae5f4122873b11d
|
[
"MIT"
] |
permissive
|
HectorPulido/Simple-python-blog
|
fda8148b7b9cc8f731370d08807d41ea265051ab
|
09bd57afa1616340ea9bb960613c34eddb3dcd19
|
refs/heads/master
| 2020-11-28T03:00:57.580847
| 2020-01-22T20:21:14
| 2020-01-22T20:21:14
| 229,688,143
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
import os
import mysql.connector
class Database:
def __init__(self):
host = os.environ.get("db_host", "10.10.1.54")
user = "root"
password = os.environ.get("db_password", "")
db = "blogtest"
self.con = mysql.connector.connect(
host=host,
user=user,
passwd=password,
database=db
)
def insert(self, query, values):
mycursor = self.con.cursor()
mycursor.execute(query, values)
self.con.commit()
def select(self, query, values):
mycursor = self.con.cursor()
mycursor.execute(query, values)
return [ dict(line) for line in [zip([ column[0] for column in mycursor.description], row) for row in mycursor.fetchall()] ]
|
[
"hector.pulido@red5g.co"
] |
hector.pulido@red5g.co
|
cc429011abb682a23be907ab306190a8d6757291
|
05d81c8afdbac6086b4285ab2b40d62ab332ff8b
|
/pythontuts/abstract_bc.py
|
ebaefa5667acb3d9463180c08824686ae6d9ae17
|
[] |
no_license
|
Jay952/Basic_python_programming
|
ba39abea5a924771b38dd23e3b8d25b4a20eeca0
|
13ca5609a6968076befc58ade70286dbcb8e59b9
|
refs/heads/master
| 2021-03-04T14:28:58.818920
| 2020-10-20T07:52:27
| 2020-10-20T07:52:27
| 246,041,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
from abc import ABCMeta, abstractmethod
class shape(metaclass=ABCMeta):
@abstractmethod
def printArea(self):
return 0
class Rectangle(shape):
type="Rectangle"
sides=4
def __init__(self):
self.length=6
self.breadth=4
def printArea(self):
return self.length*self.breadth
rect1=Rectangle()
rect2=shape()
print(rect1.printArea())
|
[
"[jaydeep4420@gmail.com]"
] |
[jaydeep4420@gmail.com]
|
1fd326c668976a25f4d7ec9f323b8131d00ae05c
|
bee96035e18ce20b30a544861183bc304d80a044
|
/prepwork/data_transform.py
|
1c83539dea12ff14d01a44711b74fec3d6fc539f
|
[] |
no_license
|
mpucci92/Logma
|
53239f675a4b02016aca459c5169e9d5bdbe03e0
|
57dccb090110e4653fd3986ac7816a67cd0f172e
|
refs/heads/master
| 2020-04-29T09:57:02.154614
| 2019-03-17T01:48:59
| 2019-03-17T01:48:59
| 176,044,486
| 0
| 0
| null | 2019-03-17T01:51:02
| 2019-03-17T01:51:02
| null |
UTF-8
|
Python
| false
| false
| 3,027
|
py
|
import pandas as pd
import numpy as np
import sys, os
import matplotlib.pyplot as plt
import seaborn as sns
from argparse import ArgumentParser
from consts import dir_
n_periods = 60
n_candles = 480
def filter_weekends(df):
df['Datetime'] = pd.to_datetime(df.Datetime)
df['DayName'] = df.Datetime.dt.weekday_name
df = df[~df.DayName.isin(['Saturday', 'Sunday'])]
return df.drop('DayName', axis=1)
def compute_relative_volume(df, n_periods, n_candles):
#Align the dataset to start at a day
idx = df.Datetime.astype(str).str.split(' ', expand=True)[1].values.tolist().index('00:00:00')
df = df.iloc[idx:, :]
cut_off = [i for i in range(0, len(df), n_candles)][-1]
df = df.iloc[:cut_off, :]
cumvol = np.array(np.split(df.Volume.values, int(len(df)/n_candles)))
cumvol = np.cumsum(cumvol, axis=1)
cumvol = cumvol.reshape(-1, )
offset = n_periods * n_candles
cumvol_final = [0 for i in range(offset)]
for i in range(offset, len(df), n_candles):
voldist = cumvol[i-offset:i]
idc = np.arange(0, offset, n_candles)
voldist = [voldist[idc+i].mean() for i in range(0, n_candles)]
cumvol_final += voldist
df['RelVol'] = np.divide(cumvol, cumvol_final)
return df.iloc[offset:, :]
def features(file, ticker):
df = pd.read_csv(file)
df = filter_weekends(df)
### Moving Averages
df['10MA'] = df.Close.rolling(window=10, min_periods=1).mean()
df['Dist10MA'] = df.Close / df['10MA']
df['20MA'] = df.Close.rolling(window=20, min_periods=1).mean()
df['Dist20MA'] = df.Close / df['20MA']
df['50MA'] = df.Close.rolling(window=50, min_periods=1).mean()
df['Dist50MA'] = df.Close / df['50MA']
df['200MA'] = df.Close.rolling(window=200, min_periods=1).mean()
df['Dist200MA'] = df.Close / df['200MA']
### Cristobands
df['10High'] = df.High.rolling(window=10, min_periods=1).mean()
df['8Low'] = df.Low.rolling(window=10, min_periods=1).mean()
df['CBSpread'] = df['10High'] - df['8Low']
df.drop(['10High', '8Low'], axis=1, inplace=True)
### Center Metrics Around 1
for col in df.columns:
if col in ['Open', 'High', 'Low', 'Close', 'CBSpread']:
df[col] = df[col].pct_change() + 1
## Bollinger Bands
df['20BB'] = df.Close.rolling(window=20, min_periods=1).mean()
df['20BBSTD'] = df['20BB'].rolling(window=20, min_periods=1).std()
df['BB'] = (df['20BB'] + 1.5 * df['20BBSTD']) - (df['20BB'] - 1.5 * df['20BBSTD'])
df['BB'] = (df['BB']+1).pct_change()+1
## Discard Temp Features
df.drop(['20BBSTD', '20BB', '10MA', '20MA', '50MA', '200MA'], axis=1, inplace=True)
## Relative Volume
df = compute_relative_volume(df, n_periods, n_candles)
df.drop('Volume', axis=1, inplace=True)
## NaN Value Check
print(df.isnull().sum(axis=0))
df.to_csv('{}/{}_clean.csv'.format(dir_, ticker))
if __name__ == '__main__':
argparse = ArgumentParser()
argparse.add_argument('file')
argparse.add_argument('ticker')
args = argparse.parse_args()
features(args.file, args.ticker)
|
[
"zach.barillaro@gmail.com"
] |
zach.barillaro@gmail.com
|
18c407c5a3c8dc798d1d01274a618e2610d84d82
|
b45b4aa92d3659583ae290437e23e5999c6c2d5a
|
/main.py
|
281896e76ba0586a5f18d522bdf0fed349e3d4f3
|
[] |
no_license
|
GudiedRyan/day_71
|
b67321dc992b8aa2198fe9b0a8e60a2cc37aecab
|
241b3612a8bfdf6f3a1e2a688042eab3619f2ad1
|
refs/heads/main
| 2023-06-21T04:24:21.563035
| 2021-07-16T20:37:30
| 2021-07-16T20:37:30
| 386,758,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
import tkinter as tk
import math
import random
# --------------------- CONSTANTS ----------------------#
WORD_BANK = ['green', 'yellow', 'gorilla', 'cheese', 'virtue', 'avatar', 'photo', 'zoo', 'there', 'you', 'are', 'code', 'python', 'love', 'type', 'hope', 'tree', 'sleep', 'rabbit', 'cartoon', 'big', 'stuff', 'ruler', 'everything', 'clock', 'fish', 'war', 'star', 'hive', 'mind', 'force']
FONT = ('Arial', 13)
words_count = 0
current_word = None
# --------------------- FUNCTIONS ----------------------#
def start():
get_word()
window.after(60000, end_game)
def check_word(event):
global current_word
global words_count
entered_word = word_entry.get()[:-1]
if entered_word == current_word:
words_count += 1
word_entry.delete(0, 'end')
get_word()
else:
print(f"Incorrect:{current_word}!={entered_word}")
word_entry.delete(0, 'end')
def get_word():
global current_word
current_word = random.choice(WORD_BANK)
word_label.config(text=f'{current_word}')
def end_game():
subtitle_label.config(text=f"Your typing speed is {words_count} words per minute.")
# --------------------- UI ----------------------#
window = tk.Tk()
window.title("Typing Speed Test")
window.config(padx=20, pady=20)
# Labels
title_label = tk.Label(text='Typing Speed Tester', font=('Arial', 20))
title_label.grid(column=1, row=0)
subtitle_label = tk.Label(text='Click start to begin, then type the words that appear. Once 60 seconds are up, you will be given your score.', font=FONT)
subtitle_label.grid(column=1, row=1)
type_label = tk.Label(text='Type this word:', font=FONT)
type_label.grid(column=0, row=2)
word_label = tk.Label(text='', font=FONT)
word_label.grid(column=1, row=2)
# Entry
word_entry = tk.Entry(font=FONT)
word_entry.grid(column=1, row=3)
# Button
start_button = tk.Button(text='Start', font=FONT, command=start)
start_button.grid(column=1, row=4)
window.bind("<space>", check_word)
window.mainloop()
|
[
"ryan.guide@comcast.net"
] |
ryan.guide@comcast.net
|
9fcb10f1a0ab474231c1ba2e782dff198d5df0f5
|
6d1e448699801f6ce249223879e2e5d9c8844bf0
|
/lasd_scrapy1/process1.py
|
a7bc7f568aa5ca60ca7fcccb48c94761b34ac65b
|
[] |
no_license
|
daily4u/scraping
|
2a2dea6fb55aa5162535d86b36304e48aa53af3c
|
a76463d5a66e5a492a065d78c8d4f242f443e0ca
|
refs/heads/master
| 2021-01-02T22:33:52.587383
| 2017-08-04T14:22:07
| 2017-08-04T14:22:07
| 99,340,477
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import os
import subprocess
import re
processname = 'python lasd1.py'
tmp = os.popen("ps -Af").read()
proccount = tmp.count(processname)
if proccount > 0:
print(proccount, ' processes running of ', processname, 'type')
else:
os.system(processname)
|
[
"bossdavid88888@gmail.com"
] |
bossdavid88888@gmail.com
|
a683c5dd2f5af5e9c3b4eceff9af48fdfb87231d
|
c254a8636fc9ad1d785b41008c7f12e99e34dacb
|
/plugwise/connections/serial.py
|
1726d5bf462b97d729c137031900223b5747312a
|
[
"MIT"
] |
permissive
|
grahamwhaley/python-plugwise
|
e937e3c305e65f523c5207d39b6ff0b4caf0adf3
|
1f885302f9c79eb7681f17d46b0a7b05e668a710
|
refs/heads/master
| 2022-10-15T16:48:14.034038
| 2020-06-08T20:03:52
| 2020-06-08T20:03:52
| 271,030,078
| 0
| 0
|
MIT
| 2020-06-09T14:47:22
| 2020-06-09T14:47:21
| null |
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
"""
Use of this source code is governed by the MIT license found in the LICENSE file.
Serial USB connection
"""
import time
import threading
import logging
from queue import Queue
import serial
import serial.threaded
from plugwise.constants import (
BAUD_RATE,
BYTE_SIZE,
PARITY,
SLEEP_TIME,
STOPBITS,
)
from plugwise.connections.connection import StickConnection
from plugwise.exceptions import PortError
from plugwise.message import PlugwiseMessage
from plugwise.util import PlugwiseException
class Protocol(serial.threaded.Protocol):
"""Serial protocol."""
def data_received(self, data):
# pylint: disable-msg=E1101
self.parser(data)
class PlugwiseUSBConnection(StickConnection):
"""simple wrapper around serial module"""
def __init__(self, port, stick=None):
self.port = port
self.baud = BAUD_RATE
self.bits = BYTE_SIZE
self.stop = STOPBITS
self.parity = serial.PARITY_NONE
self.stick = stick
self.run_writer_thread = True
self.run_reader_thread = True
self._is_connected = False
def open_port(self):
"""Open serial port"""
self.stick.logger.debug("Open serial port")
try:
self.serial = serial.Serial(
port = self.port,
baudrate = self.baud,
bytesize = self.bits,
parity = self.parity,
stopbits = self.stop,
)
self._reader_thread = serial.threaded.ReaderThread(self.serial, Protocol)
self._reader_thread.start()
self._reader_thread.protocol.parser = self.feed_parser
self._reader_thread.connect()
except serial.serialutil.SerialException as err:
self.stick.logger.debug(
"Failed to connect to port %s, %s",
self.port,
err,
)
raise PortError(err)
else:
self.stick.logger.debug("Successfully connected to serial port %s", self.port)
self._write_queue = Queue()
self._writer_thread = threading.Thread(None, self.writer_loop,
"write_packets_process", (), {})
self._writer_thread.daemon = True
self._writer_thread.start()
self.stick.logger.debug("Successfully connected to port %s", self.port)
self._is_connected = True
def close_port(self):
"""Close serial port."""
self._is_connected = False
self.run_writer_thread = False
try:
self._reader_thread.close()
except serial.serialutil.SerialException:
self.stick.logger.error("Error while closing device")
raise PlugwiseException("Error while closing device")
def read_thread_alive(self):
"""Return state of write thread"""
return self._reader_thread.isAlive()
def write_thread_alive(self):
"""Return state of write thread"""
return self._writer_thread.isAlive()
def is_connected(self):
"""Return connection state"""
return self._is_connected
def feed_parser(self, data):
"""Parse received message."""
assert isinstance(data, bytes)
self.stick.feed_parser(data)
def send(self, message, callback=None):
"""Add message to write queue."""
assert isinstance(message, PlugwiseMessage)
self._write_queue.put_nowait((message, callback))
def writer_loop(self):
"""Write thread."""
while self.run_writer_thread:
(message, callback) = self._write_queue.get(block=True)
self.stick.logger.debug("Sending %s to plugwise stick (%s)", message.__class__.__name__, message.serialize())
self._reader_thread.write(message.serialize())
time.sleep(SLEEP_TIME)
if callback:
callback()
|
[
"frank_van_breugel@hotmail.com"
] |
frank_van_breugel@hotmail.com
|
2d684ce62f91df297cd9025742157492e9f7b15e
|
0f99fa25ceb6a026f62ee0f8c99bf029bc24657e
|
/parameters.py
|
ac864d97fa6bd2dfe6249a747bfdd86791814cc1
|
[
"MIT"
] |
permissive
|
betatim/flavours-of-physics
|
0e382cf880ab29e2754a3a4dbe8499e377f31bac
|
b8db8cb5905643ed9cd5b44f65a6e051e592494c
|
refs/heads/master
| 2020-04-30T02:25:59.820261
| 2015-10-22T09:22:11
| 2015-10-22T09:22:11
| 45,092,752
| 0
| 0
| null | 2015-10-28T06:13:20
| 2015-10-28T06:13:20
| null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# https://github.com/gramolin/flavours-of-physics
# Random seed:
random_state = 1
# Weight for the first booster:
w1 = 0.78
# Numbers of trees:
num_trees1 = 200 # Booster 1
num_trees2 = 100 # Booster 2
# Parameters of the first booster:
params1 = {'objective': 'binary:logistic',
'eta': 0.05,
'max_depth': 4,
'scale_pos_weight': 5.,
'silent': 1,
'seed': random_state}
# Parameters of the second booster:
params2 = {'objective': 'binary:logistic',
'eta': 0.05,
'max_depth': 4,
'scale_pos_weight': 5.,
'silent': 1,
"seed": random_state}
|
[
"gramolin@gmail.com"
] |
gramolin@gmail.com
|
979864f68c8f51e1001c261521bd88c954863337
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_7/GDS/shamil_v3/purchase_transportation/transportations.py
|
8ed7b8054bde56377337d809bee89f1ca678875a
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269
| 2018-12-20T06:00:00
| 2018-12-20T06:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,282
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2011-2012 NCTR (<http://www.nctr.sd>).
#
##############################################################################
import netsvc
import time
from tools.translate import _
from osv import osv, fields
import decimal_precision as dp
class transportation_order(osv.osv):
"""
To manage the transportation basic concepts and operations"""
def create(self, cr, user, vals, context=None):
"""
This method override the create method to get sequence and update
field name by sequence value.
@param vals: list of record to be process
@return: new created record ID
"""
if ('name' not in vals) or (vals.get('name')=='/'):
vals['name'] = self.pool.get('ir.sequence').get(cr, user, 'transportation.order')
new_id = super(transportation_order, self).create(cr, user, vals, context)
return new_id
TYPE = [
('purchase', 'Purchase transportation'),
]
STATE_SELECTION = [
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('invoice','Invoice'),
('done', 'Done'),
('cancel', 'Cancel'),
]
DELIVERY_SELECTION = [
('air_freight', 'Air Freight'),
('sea_freight', 'Sea Freight'),
('land_freight', 'Land Freight'),
]
_name = 'transportation.order'
_description = "Transportation order"
_columns = {
'name': fields.char('Reference', size=64, required=True, readonly=1, select=True,
help="unique number of the transportations, computed automatically when the transportations order is created"),
'purchase_order_id' : fields.many2one('purchase.order', 'Purchase order',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'department_id':fields.many2one('hr.department', 'Department',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'source_location': fields.char('Source location', size=64,select=True,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'destination_location': fields.char('Destination location', size=64,select=True,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'transportation_date':fields.date('Transportation Date', required=True, select=True,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],},
help="Date on which this document has been created."),
'transportation_type': fields.selection(TYPE, 'Transportation type', select=True,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'description': fields.text('Transportation description' ,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'delivery_method': fields.selection(DELIVERY_SELECTION, 'Method of dispatch', select=True ,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],} ),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help="Pricelist for current supplier",states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'transportation_line_ids':fields.one2many('transportation.order.line', 'transportation_id' , 'Products',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'transportation_drivers':fields.one2many('transportation.driver', 'transportation_id' , 'Drivers',states={'done':[('readonly',True)]}),
'state': fields.selection(STATE_SELECTION, 'State', readonly=True, help="The state of the transportation.", select=True),
'notes': fields.text('Notes',states={'done':[('readonly',True)]}),
'user': fields.many2one('res.users', 'Responsible',readonly=True,states={'done':[('readonly',True)]}),
'account_vouchers': fields.many2many('account.voucher', 'transportation_voucher', 'transportation_id', 'voucher_id', 'Account voucher',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'allocation_base':fields.selection([('weight','WEIGHT'),('quantity','QUANTITY'),('space','Space (volume)'),('price','PRICE'),],'Allocation Base',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'quotes_ids':fields.one2many('transportation.quotes', 'transportation_id' ,'Quotes',states={'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'supplier_chose_reason_delivery':fields.boolean('Good delivery',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'supplier_chose_reason_quality':fields.boolean('High quality',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'supplier_chose_reason_price':fields.boolean('Good price',states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'supplier_chose_reason_others': fields.char('Other Reasons', size=256 ,states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],} ),
'partner_id':fields.many2one('res.partner', 'Transporter', states={'done':[('readonly',True)],'invoice':[('readonly',True)],}),
'purpose': fields.selection([('purchase','Purchase'),('stock','Stock'),('other','Other')],'Purpose', required=True ,select=True, states={'confirmed':[('readonly',True)],'done':[('readonly',True)],'invoice':[('readonly',True)],}),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'transportation_date': lambda *a: time.strftime('%Y-%m-%d'),
'state': 'draft',
'user': lambda self, cr, uid, context: uid,
'transportation_type':'purchase',
'allocation_base': 'price',
}
def copy(self, cr, uid, id, default={}, context=None):
"""
Override copy function to edit defult value.
@param default: default vals dict
@return: super copy method
"""
seq_obj = self.pool.get('ir.sequence')
default.update({
'state':'draft',
'name': seq_obj.get(cr, uid, 'transportation.order'),
'transportation_date':time.strftime('%Y-%m-%d'),
'transportation_line_ids':[],
})
return super(transportation_order, self).copy(cr, uid, id, default, context)
def get_products(self, cr, uid, ids, purchase_id, context={}):
"""
To read purchase order lines when select a purchase order.
@param purchase_id : purchase order id
@return: True
"""
purchase_obj = self.pool.get('purchase.order').browse(cr, uid, purchase_id)
transportation_product_odj=self.pool.get('transportation.order.line')
transportation = self.pool.get('transportation.order').browse(cr, uid, ids)
if transportation[0].transportation_line_ids != []:
raise osv.except_osv(_('this Transportation is already contain products !'), _('to chose a Purchase Order delete all the products first ..'))
for product in purchase_obj.order_line:
transportation_product_odj.create(cr,uid,{
'name': purchase_obj.name + ': ' +(product.name or ''),
'product_id': product.product_id.id,
'price_unit': product.price_unit,
'product_qty': product.product_qty,
'product_uom': product.product_uom.id,
'transportation_id': ids[0],
'description': 'purchase order '+ purchase_obj.name ,
'purchase_line_id': product.id,
'price_unit': product.price_unit,
'code_calling':True, })
self.write(cr,uid,ids,{'description':purchase_obj.name})
return True
def load_items(self, cr, uid, ids,purchase_id, context=None):
"""
To load purchase order lines of the selected purchase order to transportaion lines.
@param purchase_id: purchase order id
@return: True
"""
for order in self.browse(cr, uid, ids):
if order.purchase_order_id:
self.get_products(cr, uid, ids,order.purchase_order_id.id, context=context)
return True
def confirmed(self,cr,uid,ids,*args):
"""
Workflow function to change state of Purchase transportation to confirmed.
@return: True
"""
for order in self.browse(cr, uid, ids):
if order.purchase_order_id:
if not order.transportation_line_ids:
raise osv.except_osv(_('Load purchase items first!'), _('Please Load purchase items Purchase Order ..'))
if order.transportation_line_ids:
self.write(cr, uid, ids, {'state':'confirmed'})
else:
raise osv.except_osv(_('No Products !'), _('Please fill the products list first ..'))
return True
def invoice(self, cr, uid, ids, context={}):
"""
function to change state of Purchase transportation to invoice,
check driver information and calculate transportaion price of purche
line by allocate_purchase_order_line_price() and write the price to
purhase order.
@return: True
"""
purchase_ids = []
for transportation in self.browse(cr, uid, ids):
if not transportation.quotes_ids:
raise osv.except_osv(_('wrong action!'), _('Sorry no quotes to be invoiced'))
if not transportation.transportation_drivers :
raise osv.except_osv(_('No Driver !'), _('Please add the Drivers first ..'))
amount = 0.0
for quote in transportation.quotes_ids:
if quote.state in ['done']:
quote_obj = quote
#else : raise osv.except_osv(_('wrong action!'), _('Please approve your quotes first'))
purchase_ids.append(transportation.purchase_order_id.id)
transportation._calculate_transportation_amount(quote_obj.amount_total,quote_obj)
self.write(cr, uid, ids, {'state':'invoice'},context=context)
if False not in purchase_ids:
self.allocate_purchase_order_line_price(cr,uid,ids,purchase_ids)
return True
def done(self, cr, uid, ids, context=None):
"""
Workflow function to change state of Purchase transportation to Done
and create voucher and voucher lines with transportaion price.
@return: True
"""
self.invoice(cr, uid, ids, context)
transportation_line_obj = self.pool.get('transportation.order.line')
company_obj = self.pool.get('res.users').browse(cr, uid, uid).company_id
journal = company_obj.transportation_jorunal
account = company_obj.transportation_account
if not journal:
raise osv.except_osv(_('wrong action!'), _('no Transportation journal defined for your company! please add the journal first ..'))
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
transportation_obj = self.pool.get('transportation.order').browse(cr,uid,ids)
transportation_voucher = []
purchase_ids = []
for transportation in transportation_obj:
amount = 0.0
purchase = ''
if transportation.purpose == 'purchase':
purchase = transportation.purchase_order_id.name
if transportation.purchase_order_id.purchase_type == 'foreign':
account = company_obj.purchase_foreign_account
if transportation.purchase_order_id.contract_id:
if not transportation.purchase_order_id.contract_id:
raise osv.except_osv(_('Missing Account Number !'),_('There No Account Defined Fore This Contract please chose the account first') )
account = transportation.purchase_order_id.contract_id.contract_account
for quote in transportation.quotes_ids:
if quote.state in ['done']:
quote_obj = quote
if not account:
raise osv.except_osv(_('wrong action!'), _('no Transportation Account defined! please add the account first ..'))
voucher_id = voucher_obj.create(cr, uid, {
'amount': quote_obj.amount_total ,
'type': 'purchase',
'date': time.strftime('%Y-%m-%d'),
'partner_id': quote_obj.supplier_id.id,
'account_id': quote_obj.supplier_id.property_account_payable.id,
'journal_id': journal.id,
'reference': transportation.name + purchase,})
vocher_line_id = voucher_line_obj.create(cr, uid, {
'amount': quote_obj.amount_total ,
'voucher_id': voucher_id,
'type': 'dr',
'account_id': account.id,
'name': transportation.description or transportation.name ,
})
transportation_voucher.append(voucher_id)
purchase_ids.append(transportation.purchase_order_id.id)
self.write(cr, uid, ids, {'state':'done','account_vouchers':[(6,0,transportation_voucher)]})
return True
def allocate_purchase_order_line_price(self, cr, uid,ids,purchase_ids):
"""
Calculate transportaion price for every purchase line and write the price to purchase order lines.
@param purchase_ids: list of purchase orders ids
@return: Ture
"""
purchase_line_obj = self.pool.get('purchase.order.line')
transportation_product_obj = self.pool.get('transportation.order.line')
for purchase in self.pool.get('purchase.order').browse(cr, uid, purchase_ids):
for line in purchase.order_line:
transportation_item = transportation_product_obj.search(cr,uid,[('product_id','=',line.product_id.id),('transportation_id','in',[ids])])
if transportation_item :
transportation_product = transportation_product_obj.browse(cr, uid, transportation_item[0])
amount = transportation_product.transportation_price_unit
total = line.extra_price_total+amount
if line.transportation_price:
amount += line.transportation_price
purchase_line_obj.write(cr,uid,line.id,{'transportation_price': amount,'extra_price_total':total})
return True
def cancel(self,cr,uid,ids,notes=''):
"""
Workflow function to changes clearance state to cancell and writes notes.
@param notes : contains information.
@return: True
"""
notes = ""
user = self.pool.get('res.users').browse(cr, uid,uid).name
notes = notes +'\n'+'purchase Transportation Cancelled at : '+time.strftime('%Y-%m-%d') + ' by '+ user
self.write(cr, uid, ids, {'state':'cancel','notes':notes})
return True
def ir_action_cancel_draft(self, cr, uid, ids, *args):
"""
To changes clearance state to Draft and reset workflow.
@return: True
"""
if not len(ids):
return False
wf_service = netsvc.LocalService("workflow")
for s_id in ids:
self.write(cr, uid, s_id, {'state':'draft'})
wf_service.trg_delete(uid, 'transportation.order', s_id, cr)
wf_service.trg_create(uid, 'transportation.order', s_id, cr)
return True
def partner_id_change(self, cr, uid, ids,partner):
"""
On change partner function to read partner pricelist.
@param partner: partner id.
@return: Dictonary of partner's pricelist value
"""
partne = self.pool.get('res.partner.address').search(cr, uid, [('partner_id', '=', partner)])
if partne:
prod= self.pool.get('res.partner.address').browse(cr, uid,partne[0])
return {'value': {'pricelist_id':prod.partner_id.property_product_pricelist_purchase.id }}
def create_quote(self, cr, uid, ids, context=None):
"""
Button function to creates qoutation
@return: True
"""
for obj in self.browse(cr, uid, ids):
if obj.transportation_line_ids:
pq_id = self.pool.get('transportation.quotes').create(cr, uid, {'transportation_id': obj.id,}, context=context)
for product in obj.transportation_line_ids:
prod_name = ''
if product.product_id.id :
prod_name = self.pool.get('product.product').browse(cr, uid,product.product_id.id, context=context).name
if product.name:
prod_name = product.name
q_id = self.pool.get('transportation.quotes.products').create(cr, uid, {
'name':prod_name,
'price_unit': 0.0,
'price_unit_tax': 0.0,
'price_unit_total': 0.0,
'product_id': product.product_id.id or False,
'product_qty': product.product_qty,
'quote_id':pq_id,
'description': product.description,
'transportation_line': product.id,
})
else:
raise osv.except_osv(('No Products !'), ('Please fill the product list first ..'))
return True
def _calculate_transportation_amount(self, cr, uid, ids, quote_amount,quote):
"""
To calculate transportasiion amount for every clearance line accourding to
allocation base the default allocation is price percentage.
@return: True
"""
quote_product = self.pool.get('transportation.quotes.products')
for transportation in self.browse(cr, uid, ids):
total_qty = total_weight = total_space = 0.0
# calculate the total amount of qty, wight, space and price
for item in transportation.transportation_line_ids:
total_qty += item.product_qty
if transportation.allocation_base in ['weight']:
if item.weight:
total_weight += item.weight
else:
raise osv.except_osv(_('No Product weight!'), _('Please fill the product weight first ..'))
if transportation.allocation_base in ['space']:
if item.space :
total_space += item.space
else:
raise osv.except_osv(_('No Product Space (volume) !'), _('Please fill the product Space (volume) first ..'))
for item in transportation.transportation_line_ids:
# get the line Id to get the price of the item
line = quote_product.search(cr, uid,[('quote_id','=',quote.id), ('product_id','=',item.product_id.id),('product_qty','=',item.product_qty)])[0]
line_obj = quote_product.browse(cr, uid, line)
# alocate the price to the item base on the allocation base
if transportation.allocation_base in ['quantity']: amount = quote_amount*( item.product_qty/ total_qty)
elif transportation.allocation_base in ['weight']:amount = quote_amount*(item.weight/total_weight)
elif transportation.allocation_base in ['space']: amount = quote_amount*(item.space / total_space)
else: amount = line_obj.price_unit
item.write({'transportation_price_unit':amount})
return True
def purchase_ref(self, cr, uid, ids, purchase_ref, context=None):
#To Clear Products Line From Clearance_Ids
if purchase_ref:
if ids == []:
raise osv.except_osv(_('The Transportation must be saved first!'), _('please save the Transportation before selecting the Purchase Order ..'))
else:
self.get_products(cr, uid, ids,purchase_ref)
return {}
def onchange_purpose(self, cr, uid, ids, purpose ,context=None):
"""
On change purpose function to change delivery Method
@param purpose: purpose
@return: Dictionary
"""
if purpose and ids:
unlink_ids = self.pool.get('transportation.order.line').search(cr, uid,[('transportation_id','=',ids[0])] )
self.pool.get('transportation.order.line').unlink(cr, uid, unlink_ids, context=context)
land = 'land_freight'
if purpose != 'purchase' :
return {'value': { 'delivery_method' : land }}
return {}
class transportation_order_line(osv.osv):
"""
To manage transportaion order lines """
_name = 'transportation.order.line'
_description = "Transportation order line"
_columns = {
'name': fields.char('Description', size=256, required=True,readonly=False),
'product_qty': fields.float('Quantity', required=True, digits=(16,2)),
'product_uom': fields.many2one('product.uom', 'Product UOM',readonly=False),
'product_id': fields.many2one('product.product', 'Product', required=True),
'transportation_id': fields.many2one('transportation.order', 'Transportation',),
'transportation_price_unit': fields.float('Transportation price',readonly=True, digits=(16,2)),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="Control the packages of the products"),
'description': fields.text('Specification' ,readonly=True),
'weight': fields.float('Weight',readonly=False, digits=(16, 2)),
'space': fields.float('Space (volume)',readonly=True, digits=(16, 2)),
'notes': fields.text('Notes'),
'purchase_line_id': fields.many2one('purchase.order.line','order_line'),
'price_unit': fields.float('Purchase Price Unite',readonly=True, digits=(16,2)),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
}
_sql_constraints = [
('produc_uniq', 'unique(product_id,transportation_id)', 'Sorry You Entered Product Two Time You are not Allow to do this.... So We going to delete The Duplicts!'),
('check_product_quantity', 'CHECK ( product_qty > 0 )', "Sorry Product quantity must be greater than Zero."),
]
def create(self, cr, uid, vals, context=None):
purpose = vals and vals['transportation_id'] and self.pool.get('transportation.order').browse(cr, uid,vals['transportation_id']).purpose
if purpose =='purchase' and ( ('code_calling' not in vals) or not vals['code_calling']):
raise osv.except_osv(_('Sorry Can not add new items'), _('the purpose is purchase'))
return super(transportation_order_line, self).create(cr, uid, vals, context)
def product_id_change(self, cr, uid, ids,product):
"""
On cange product function to read the default name and UOM of product
@param product: product_id
@return: Dictionary of product name and uom or empty dictionary
"""
if product:
prod= self.pool.get('product.product').browse(cr, uid,product)
return {'value': { 'name':prod.name,'product_uom':prod.uom_po_id.id,'product_qty': 1.0}}
return {}
def qty_change(self, cr, uid, ids, product_qty, context=None):
"""
To check the product quantity to not vaulate transportaion order quantity.
@return: True
"""
for product in self.browse(cr, uid, ids):
purchase_id = product.transportation_id.purchase_order_id
if purchase_id.order_line:
for line in purchase_id.order_line:
if product.product_id == line.product_id:
if product_qty > line.product_qty :
raise osv.except_osv(_('wrong action!'), _('This Quantity is more than the Purchase Order Quantity ..'))
return True
class transportation_driver(osv.osv):
"""
To manage transportaion driver """
_name = 'transportation.driver'
_description = "Transportation Driver"
_columns = {
'name': fields.char('Reference', size=64, required=True, readonly=1, select=True),
'driver_name': fields.char('Driver Name', size=64, required=True,),
'phone_number': fields.integer('Phone Number'),
'car_type': fields.char('Car Type', size=64, required=True, select=True),
'car_number': fields.char('Car Number', size=64, required=True, select=True),
'transportation_id': fields.many2one('transportation.order', 'Transportation ref',),
'description': fields.text('Description'),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'transportation.driver')
}
_sql_constraints = [
#('tran_driver_uniq', 'unique(transportation_id,car_number)', 'Sorry You Entered The Same Car Tow Times for this transportaion order!'),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"bakry@exp-sa.com"
] |
bakry@exp-sa.com
|
4330f8a424c768e77c4d469a0a856cde88cf4ba4
|
702d0994a15b4e215a5563c0c54393da2ed7e99e
|
/thehanger/shop/migrations/0003_auto_20211018_2306.py
|
607a87b7090827d6f89d591e393ea94baddc4299
|
[] |
no_license
|
lengoc12/DAN
|
2dfa00c4bda5f02f6bfd2dbb84f0cc624b58d380
|
1440229130f03b9e5671da83f45b61205758920c
|
refs/heads/master
| 2023-08-23T23:45:39.064121
| 2021-10-27T16:27:13
| 2021-10-27T16:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
# Generated by Django 3.2.7 on 2021-10-18 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_order_payment_method'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='status',
),
migrations.AddField(
model_name='category',
name='image',
field=models.ImageField(default=None, upload_to='category/'),
),
]
|
[
"52289366+lengoc12@users.noreply.github.com"
] |
52289366+lengoc12@users.noreply.github.com
|
d5cbcca27e7788245d9504386ab9666887d65a94
|
52fe45a03eb4545094adfdff48c439843972d2a8
|
/utilities/to_telugu_equal_csv.py
|
52759a7e5104a05ce597707a523da8bc22f448c6
|
[] |
no_license
|
saibharani/Music_Generation
|
d49eb8d527651e530f509710d3a03dd566d9b657
|
5bc409f9cdc33464f80468bac1c8c75f07a45895
|
refs/heads/master
| 2020-03-25T10:17:39.217790
| 2018-07-12T17:04:08
| 2018-07-12T17:04:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("Sheet 1")
sheet1.write(0, 1, "text")
fobj = open('transliterated.txt')
txt = fobj.read()
txt_list = txt.split(" ")
txt_len = len(txt_list)
req_len = txt_len // 65000
counter = 0
i = 1
str_txt = ""
for word in txt_list:
if (word.strip() != ""):
str_txt += word
str_txt += " "
counter+=1
if (counter == req_len):
counter = 0
sheet1.write(i,1,str_txt)
str_txt = ""
i += 1
book.save("telugu_equal.xls")
|
[
"amatya.avadhanula@gmail.com"
] |
amatya.avadhanula@gmail.com
|
52140a927a1c45b88b007bd1af0bfe4d2d942003
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/django_wk/Mikesite/pubApp/pubmanager.py
|
fec872afed9ea1d9834c4cc18521c7088d2b2c74
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873
| 2015-12-10T01:11:36
| 2015-12-10T01:11:36
| 144,883,382
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
'''
Created on 2013-12-29
@author: Walter
'''
from pubApp.models import Paper
class PubManager(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.type = "all"
self.year = "all"
self.type_all = "all"
self.type_article = "article"
self.type_proceeding = "inproceedings"
self.year_all = "all"
self.typeIdx = 0
def getYearList(self):
years = []
publications = Paper.objects.all();
for p in publications:
if not (str(p.year) in years):
years.append(str(p.year))
years.sort(reverse=True)
return years
def getPubList(self):
papers = []
if self.type == "all" and self.year =="all":
papers = Paper.objects.all().order_by('-year','title')
elif self.type == "all":
papers = Paper.objects.filter(year=self.year).order_by('-year','title')
elif self.year == "all":
papers = Paper.objects.filter(type=self.type).order_by('-year','title')
else:
papers = Paper.objects.filter(type=self.type).filter(year=self.year).order_by('-year','title')
self.typeIdx = self.getTypeIndex()
pub_years = []
for p in papers:
ys = [y for y in pub_years if p.year==y[0]]
if len(ys) == 0:
pub_years.append((p.year, []))
for p in papers:
year = next(y for y in pub_years if p.year==y[0])
year[1].append(p)
return pub_years
def getTypeIndex(self):
if self.type == self.type_article:
return 1
if self.type == self.type_proceeding:
return 2
return 0
def getYearIndex(self):
if self.year == self.year_all:
return 0
return int(self.year)
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
4b4586f98d333cae5f2725cd0eefb642230b844a
|
7739c30c4d0260c0cb2db9606620009434e5d362
|
/usrobj_src/pyembroideryGH_AddStitchblock.py
|
dca66a7ca7f299b52d7325456eda52e50d368e26
|
[
"MIT"
] |
permissive
|
fstwn/pyembroideryGH
|
1dc82c4fd7c071ea234434fe433f92932fb20e46
|
ee7fd47316b364962a69976b4168ce9fef2702cf
|
refs/heads/master
| 2021-12-01T06:17:41.716433
| 2021-11-29T10:32:32
| 2021-11-29T10:32:32
| 232,354,805
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,217
|
py
|
"""
Adds one or many StitchBlocks to an embroidery pattern supplied as
pyembroidery.EmbPattern instance
Inputs:
Pattern: The pattern to be modified as pyembroidery.EmbPattern
instance.
{item, EmbPattern}
StitchBlock: The stitchblock(s) to add to the pattern.
{list, StitchBlock}
Output:
Pattern: The modified pattern with the newly added stitchblock(s).
{item/list/tree, EmbPattern}
Remarks:
Author: Max Eschenbach
License: MIT License
Version: 201030
"""
# PYTHON STANDARD LIBRARY IMPORTS
from __future__ import division
# GHPYTHON SDK IMPORTS
from ghpythonlib.componentbase import executingcomponent as component
import Grasshopper, GhPython
import System
import Rhino
import rhinoscriptsyntax as rs
# GHENV COMPONENT SETTINGS
ghenv.Component.Name = "AddStitchBlock"
ghenv.Component.NickName = "ASB"
ghenv.Component.Category = "pyembroideryGH"
ghenv.Component.SubCategory = "3 Pattern Creation"
# LOCAL MODULE IMPORTS
try:
import pyembroidery
except ImportError:
errMsg = ("The pyembroidery python module seems to be not correctly " +
"installed! Please make sure the module is in you search " +
"path, see README for instructions!.")
raise ImportError(errMsg)
class StitchBlock(object):
def __init__(self, stitches, thread):
self._set_stitches(stitches)
self._set_thread(thread)
def __getitem__(self, item):
return (self.stitches, self.thread)[item]
def get_stitches_iter(self):
for s in self._stitches:
yield s
def _get_stitches(self):
return self._stitches
def _set_stitches(self, stitches):
if isinstance(stitches, list):
self._stitches = stitches
elif isinstance(stitches, tuple):
self._stitches = list(stitches)
else:
raise ValueError("Supplied data for stitches is not a valid list " +
"of stitches!")
stitches = property(_get_stitches, _set_stitches, None,
"The stitches of this StitchBlock")
def _get_thread(self):
return self._thread
def _set_thread(self, thread):
if isinstance(thread, pyembroidery.EmbThread):
self._thread = thread
else:
raise ValueError("Supplied thread is not a valid EmbThread " +
"instance!")
thread = property(_get_thread, _set_thread, None,
"The thread of this StitchBlock")
def ToString(self):
descr = "StitchBlock ({} Stitches, EmbThread {})"
color = self.thread.hex_color()
descr = descr.format(len(self.stitches), color)
return descr
class AddStitchBlock(component):
def RunScript(self, pattern_in, stitchblock):
# initialize outputs
Pattern = Grasshopper.DataTree[object]()
if pattern_in is not None and stitchblock:
# copy the input pattern to avoid modification on the original object
if isinstance(pattern_in, pyembroidery.EmbPattern):
pattern_in = pattern_in.copy()
else:
raise TypeError("Supplied pattern is no valid " +
"pyembroidery.EmbPattern instance! " +
"Please check your inputs and try again.")
# loop over all stitchblocks and add to pattern
for i, sb in enumerate(stitchblock):
pattern_in.add_stitchblock(sb)
# add pattern to output tree
Pattern.Add(pattern_in)
else:
rml = self.RuntimeMessageLevel.Warning
if pattern_in is None:
errMsg = ("Input Pattern failed to collect data!")
self.AddRuntimeMessage(rml, errMsg)
if not stitchblock:
errMsg = ("Input StitchBlock failed to collect data!")
self.AddRuntimeMessage(rml, errMsg)
# return outputs if you have them; here I try it for you:
return Pattern
|
[
"post@maxeschenbach.com"
] |
post@maxeschenbach.com
|
b7273a3830ebaeae6f415ae4458105f916fb744c
|
2230b5e0f2ee3733ecab6e76957e8ed9b1272d84
|
/tests/mix/nicib/tc_nicib.py
|
7b42e42e8fc3cbf30934c8fa752a560336d4e6f2
|
[] |
no_license
|
AlertBear/fcior
|
e9f26e6775ae41d1539f128f0dd212bdaa3080dd
|
f101c9d330be0858578e74631746870190002788
|
refs/heads/master
| 2020-05-19T12:36:46.214449
| 2015-10-23T02:24:13
| 2015-10-23T02:24:13
| 29,911,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,835
|
py
|
#!/usr/bin/python2.7
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
#
import ctiutils
import time
import os
import traceback
from basic import *
from common import *
# test purposes
from tp_nicib_001 import tp_nicib_001
from tp_nicib_002 import tp_nicib_002
def startup():
info_print_report("FC-IOR mix nicib: startup")
# get the root domain and io domain from configuration file
nprd1_name = ctiutils.cti_getvar("NPRD_A")
nprd1_password = ctiutils.cti_getvar("NPRD_A_PASSWORD")
nprd2_name = ctiutils.cti_getvar("NPRD_B")
nprd2_password = ctiutils.cti_getvar("NPRD_B_PASSWORD")
iod_name = ctiutils.cti_getvar("IOD")
iod_password = ctiutils.cti_getvar("IOD_PASSWORD")
# get the nic and ib pfs related variables in configuration file
nic_pf_1 = ctiutils.cti_getvar("NIC_PF_A")
nic_pf_2 = ctiutils.cti_getvar("NIC_PF_B")
ib_pf_1 = ctiutils.cti_getvar("IB_PF_A")
ib_pf_2 = ctiutils.cti_getvar("IB_PF_B")
subnet = ctiutils.cti_getvar("VF_SUBNET")
# get remote host variables in configuration file
remote_nic_link = ctiutils.cti_getvar("REMOTE_NIC_LINK")
nic_remote_host = ctiutils.cti_getvar("NIC_REMOTE_HOST")
nic_remote_password = ctiutils.cti_getvar("NIC_REMOTE_HOST_PASSWORD")
remote_ib_link = ctiutils.cti_getvar("REMOTE_IB_LINK")
ib_remote_host = ctiutils.cti_getvar("IB_REMOTE_HOST")
ib_remote_password = ctiutils.cti_getvar("IB_REMOTE_HOST_PASSWORD")
# check the mix_test flag has been set
mix_test_flag = ctiutils.cti_getvar("MIX_TEST")
nic_switch_connection = ctiutils.cti_getvar("NIC_SWITCH_CONNECTION")
ib_switch_connection = ctiutils.cti_getvar("IB_SWITCH_CONNECTION")
if mix_test_flag != "yes":
error_print_report("If want to test mix_nicfc case, be ensure to "
"define MIX_TEST=yes in test_config file")
ctiutils.cti_deleteall("Not supported")
return 1
if nic_switch_connection != "yes" or ib_switch_connection != "yes":
error_print_report("If want to test mix_nicib case, be ensure to "
"connect all NIC and IB cards to the related "
"switches. Meanwhile, define "
"NIC_SWITCH_CONNECTION=yes, "
"IB_SWITCH_CONNECTION=yes in test_config file")
ctiutils.cti_deleteall("Not supported")
return 1
# check whether remote nic pf has been connected to switch and
# could be pingable from local test system
info_print_report("Checking all nic pfs have been "
"connected to switch")
remote_host_dict = {nic_remote_host: {"password": nic_remote_password,
"link": remote_nic_link}}
root_dict = {nprd1_name: {"password": nprd1_password,
"pf": nic_pf_1},
nprd2_name: {"password": nprd2_password,
"pf": nic_pf_2}}
try:
check_nic_pf_be_connected(remote_host_dict, root_dict)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Not all nic pfs are connected to switch")
return 1
else:
info_print_report("All nic pfs are connected to switch")
# check whether remote ib pf has been connected to switch and
# could be pingable from local test system
info_print_report("Checking all ib pfs have been "
"connected to switch")
ib_remote_host_dict = {ib_remote_host: {"password": ib_remote_password,
"link": remote_ib_link}}
root_dict = {nprd1_name: {"password": nprd1_password,
"pf": ib_pf_1},
nprd2_name: {"password": nprd2_password,
"pf": ib_pf_2}}
try:
check_ib_pf_be_connected(ib_remote_host_dict, root_dict)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Not all IB pfs are connected to switch")
return 1
else:
info_print_report("All IB pfs are connected to switch")
# check ib ior has been enabled in io domain
info_print_report("Checking ib ior has been enabled in [%s]" % iod_name)
try:
check_ib_ior_enabled_in_iod(iod_name, iod_password)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("IB ior has not been enabled in "
"[%s]" % iod_name)
return 1
else:
info_print_report("IB ior has been enabled in [%s]" % iod_name)
# check the nic pf whether has created vf, if yes,destroyed
nic_pf_list = [nic_pf_1, nic_pf_2]
for pf_item in nic_pf_list:
info_print_report("Checking PF [%s] whether has created vf" % pf_item)
if check_whether_pf_has_created_vf(pf_item):
info_print_report(
"VF has been created on PF [%s], trying to destroy..." %
pf_item)
try:
destroy_all_nic_vfs_on_pf(iod_name, iod_password, pf_item)
except Exception as e:
error_print_report(e)
error_report(traceback.print_exc())
ctiutils.cti_deleteall(
"Failed to destroy all the vfs created on the PF [%s]" %
pf_item)
return 1
else:
info_print_report(
"Destroy all the vfs created on PF [%s]" % pf_item)
else:
info_print_report(
"No vf has been created on the PF [%s]" % pf_item)
time.sleep(3)
# check the ib pf whether has created vf, if yes,destroyed
ib_pf_list = [ib_pf_1, ib_pf_2]
for pf_item in ib_pf_list:
info_print_report("Checking PF [%s] whether has created vf" % pf_item)
if check_whether_pf_has_created_vf(pf_item):
info_print_report(
"VF has been created on PF [%s], trying to destroy..." %
pf_item)
try:
destroy_all_ib_vfs_on_pf(iod_name, iod_password, pf_item)
except Exception as e:
error_print_report(e)
error_report(traceback.print_exc())
ctiutils.cti_deleteall(
"Failed to destroy all the vfs created on the PF [%s]" %
pf_item)
return 1
else:
info_print_report(
"Destroy all the vfs created on PF [%s]" % pf_item)
else:
info_print_report(
"No vf has been created on the PF [%s]" % pf_item)
time.sleep(3)
# create nic vfs on two pfs
try:
info_print_report("Creating vf on PF [%s]" % nic_pf_1)
nic_a_vf = create_nic_vf(nprd1_name, nprd1_password, nic_pf_1)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to create vf on the PF [%s]" % nic_pf_1)
return 1
else:
info_print_report("Created vf [%s] on pf [%s]" % (nic_a_vf, nic_pf_1))
time.sleep(30)
try:
info_print_report("Creating vf on PF [%s]" % nic_pf_2)
nic_b_vf = create_nic_vf(nprd2_name, nprd2_password, nic_pf_2)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to create vf on the PF [%s]" % nic_pf_2)
return 1
else:
info_print_report("Created vf [%s] on pf [%s]" % (nic_b_vf, nic_pf_2))
time.sleep(30)
# create ib vfs on two pfs
try:
info_print_report("Creating vf on PF [%s]" % ib_pf_1)
ib_a_vf = create_ib_vf(ib_pf_1)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to create vf on the PF [%s]" % ib_pf_1)
return 1
else:
info_print_report("Created vf [%s] on pf [%s]" % (ib_a_vf, ib_pf_1))
time.sleep(30)
try:
info_print_report("Creating vf on PF [%s]" % ib_pf_2)
ib_b_vf = create_ib_vf(ib_pf_2)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to create vf on the PF [%s]" % ib_pf_2)
return 1
else:
info_print_report("Created vf [%s] on pf [%s]" % (ib_b_vf, ib_pf_2))
time.sleep(30)
# allocate vfs to the io domain
vfs_list = [nic_a_vf, nic_b_vf, ib_a_vf, ib_b_vf]
for vf in vfs_list:
try:
info_print_report(
"Allocating vf [%s] to io domain [%s]" % (vf, iod_name))
assign_vf_to_domain(vf, iod_name)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall(
"Failed to assign the vf [%s] to domain [%s] " % (vf, iod_name))
return 1
else:
info_print_report(
"VF [%s] has been allocated to io domain [%s]" % (vf, iod_name))
time.sleep(5)
# reboot io domain
try:
info_print_report(
"Rebooting io domain [%s] after allocated vfs ..." % iod_name)
reboot_domain(iod_name, iod_password)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to reboot io domain [%s]" % iod_name)
return 1
# configure nic vfs ipmp in io domain
info_print_report(
"Configuring the corresponding nic interfaces to be "
"IPMP in io domain [%s]" % iod_name)
ipmp = 'ior_ipmp0'
ip_addr = subnet + '.11.1'
try:
configure_nic_vfs_ipmp_in_domain(
iod_name,
iod_password,
ipmp,
nic_a_vf,
nic_b_vf,
ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to configure nic vfs interface"
" to be IPMP io domain [%s]" % iod_name)
return 1
else:
info_print_report("Configured done")
# configure remote nic interface to be pingable from nic ipmp in io domain
info_print_report("Configuring NIC interface in remote host")
rmt_ip_addr = subnet + '.11.2'
try:
configure_nic_ip_in_remote(
nic_remote_host,
nic_remote_password,
remote_nic_link,
rmt_ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to configure nic interface "
"on remote host [%s]" % nic_remote_host)
return 1
else:
info_print_report("Configured done")
# check whether remote interface can be pingable from io domain
info_print_report("Checking remote interface is pingable from [%s]" %
iod_name)
try:
check_remote_pingable_from_io_domain(
iod_name,
iod_password,
rmt_ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to get remote nic interface be "
"pingable from io domain [%s]" % iod_name)
return 1
else:
info_print_report("Done")
# configure IB vfs ipmp in io domain
info_print_report(
"Configuring the corresponding IB links to be "
"IPMP in io domain [%s]" % iod_name)
ipmp = 'ior_ipmp1'
ib_ip_addr = subnet + '.12.1'
try:
configure_ib_vfs_ipmp_in_domain(
iod_name,
iod_password,
ipmp,
ib_a_vf,
ib_b_vf,
ib_ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to configure IB vfs links"
" to be IPMP io domain [%s]" % iod_name)
return 1
else:
info_print_report("Configured done")
# configure remote IB link to be pingable from ib ipmp in io domain
info_print_report("Configuring IB link in remote host")
rmt_ib_ip_addr = subnet + '.12.2'
try:
configure_ib_ip_in_remote(
ib_remote_host,
ib_remote_password,
remote_ib_link,
rmt_ib_ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to configure IB link "
"on remote host [%s]" % ib_remote_host)
return 1
else:
info_print_report("Configured done")
# check whether remote ib link can be pingable from io domain
info_print_report("Checking remote ib link is pingable from [%s]" %
iod_name)
try:
check_remote_pingable_from_io_domain(
iod_name,
iod_password,
rmt_ip_addr)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to get remote ib link be pingable "
"from io domain [%s]" % iod_name)
return 1
else:
info_print_report("Done")
# run io traffic on ib ipmp interface, io traffic is just ping.
try:
info_print_report(
"Run traffic between remote interface and "
"ipmp group in io domain [%s]" % iod_name)
run_ping_traffic_in_domain(
iod_name,
iod_password,
ib_ip_addr,
rmt_ib_ip_addr)
except Exception as e:
error_print_report(e)
error_report(traceback.print_exc())
ctiutils.cti_deleteall(
"Failed to run traffic between remote ib link"
" and ib ipmp in io domain [%s]" % iod_name)
return 1
# Get the test vfs info dict
iod_info_dict = {"name": iod_name, "password": iod_password}
nic_pf_1_vfs_dict = {}
nic_pf_2_vfs_dict = {}
nic_pf_1_vfs_dict.update({nic_a_vf: iod_name})
nic_pf_2_vfs_dict.update({nic_b_vf: iod_name})
ib_pf_1_vfs_dict = {}
ib_pf_2_vfs_dict = {}
ib_pf_1_vfs_dict.update({ib_a_vf: iod_name})
ib_pf_2_vfs_dict.update({ib_b_vf: iod_name})
all_vfs_dict = {
nprd1_name: {
nic_pf_1: nic_pf_1_vfs_dict,
ib_pf_2: ib_pf_1_vfs_dict
},
nprd2_name: {
nic_pf_2: nic_pf_2_vfs_dict,
ib_pf_2: ib_pf_2_vfs_dict
}
}
try:
info_print_report(
"Getting all vfs information...")
get_all_vfs_info(iod_info_dict, all_vfs_dict)
except Exception as e:
error_print_report(e)
error_report(ctiutils.cti_traceback())
ctiutils.cti_deleteall("Failed to get all vfs information")
return 1
else:
info_print_report("Done")
return 0
def cleanup():
info_print_report("FC-IOR mix nicib: cleanup")
nic_pf_1 = ctiutils.cti_getvar("NIC_PF_A")
nic_pf_2 = ctiutils.cti_getvar("NIC_PF_B")
ib_pf_1 = ctiutils.cti_getvar("IB_PF_A")
ib_pf_2 = ctiutils.cti_getvar("IB_PF_B")
nic_rmt_name = ctiutils.cti_getvar("NIC_REMOTE_HOST")
nic_rmt_password = ctiutils.cti_getvar("NIC_REMOTE_HOST_PASSWORD")
ib_rmt_name = ctiutils.cti_getvar("IB_REMOTE_HOST")
ib_rmt_password = ctiutils.cti_getvar("IB_REMOTE_HOST_PASSWORD")
iod_name = ctiutils.cti_getvar("IOD")
iod_password = ctiutils.cti_getvar('IOD_PASSWORD')
# if nic or ib traffic process is still running, kill it
try:
info_print_report(
"Killing the nic or ib traffic"
" process in io domain [%s]" % iod_name)
kill_nic_ib_traffic_process_in_domain(iod_name, iod_password)
except Exception as e:
warn_print_report(
"Failed to kill nic or ib traffic process in [%s] due to:\n%s" %
(iod_name, e))
error_report(traceback.print_exc())
else:
info_print_report("Killed the nic or ib traffic process "
"in [%s] success" % iod_name)
time.sleep(30)
# delete the nic ipmp group and interfaces in io domain
nic_pf_list = [nic_pf_1, nic_pf_2]
try:
info_print_report("Deleting the NIC ipmp group and interfaces "
"in io domain [%s]" % iod_name)
delete_nic_interface_in_domain(iod_name, iod_password, nic_pf_list)
except Exception as e:
warn_print_report(
"Failed to delete the NIC ipmp and interfaces in [%s]"
" due to:\n%s" % (iod_name, e))
error_report(traceback.print_exc())
else:
info_print_report("Deleted the NIC ipmp and interfaces "
"in [%s] success" % iod_name)
# delete the vnic in remote host
try:
info_print_report("Deleting the vnic in remote host [%s]" % nic_rmt_name)
delete_remote_vnic(nic_rmt_name, nic_rmt_password)
except Exception as e:
warn_print_report(
"Failed to delete the vnic in remote host [%s] due to:\n%s" %
(nic_rmt_name, e))
error_report(traceback.print_exc())
else:
info_print_report("Deleted the vnic in remote host "
"[%s] success" % nic_rmt_name)
# delete the ib ipmp group and links in io domain
ib_pf_list = [ib_pf_1, ib_pf_2]
try:
info_print_report("Deleting the IB ipmp group and links "
"in io domain [%s]" % iod_name)
delete_ib_part_in_domain(iod_name, iod_password, ib_pf_list)
except Exception as e:
warn_print_report(
"Failed to delete the IB ipmp and links in [%s] "
"due to:\n%s" % (iod_name, e))
error_report(traceback.print_exc())
else:
info_print_report("Deleted the IB ipmp and interfaces "
"in [%s] success" % iod_name)
# delete the IB links in remote host
try:
info_print_report("Deleting the ib links in "
"remote host [%s]" % ib_rmt_name)
delete_remote_ib_part(ib_rmt_name, ib_rmt_password)
except Exception as e:
warn_print_report(
"Failed to delete the ib links in remote host [%s] "
"due to:\n%s" % (nic_rmt_name, e))
error_report(traceback.print_exc())
else:
info_print_report("Deleted the ib links in remote host "
"[%s] success" % ib_rmt_name)
# destroy all the vfs that has been created on nic pfs
for pf in nic_pf_list:
try:
info_print_report(
"Destroying the VFs created on [%s] in this test case" % pf)
destroy_all_nic_vfs_on_pf(iod_name, iod_password, pf)
except Exception as e:
warn_print_report(
"Failed to destroy all the vfs created on [%s] due to:\n%s" % (
pf, e))
error_report(traceback.print_exc())
else:
info_print_report(
"Destroyed all the VFs created on [%s] in this test case" % pf)
# destroy all the vfs that has been created on ib pfs
for pf in ib_pf_list:
try:
info_print_report(
"Destroying the VFs created on [%s] in this test case" % pf)
destroy_all_ib_vfs_on_pf(iod_name, iod_password, pf)
except Exception as e:
warn_print_report(
"Failed to destroy all the vfs created on [%s] due to:\n%s" % (
pf, e))
error_report(traceback.print_exc())
else:
info_print_report(
"Destroyed all the VFs created on [%s] in this test case" % pf)
# save the related logs created in this test case
try:
info_print_report(
"Saving related log files of this test case")
save_related_logs("nicib")
except Exception as e:
warn_print_report(
"Failed to save related log files due to:\n%s" % e)
else:
info_print_report('Test user could review the "related_logs" '
'in result path')
#
# construct the test list
# NOTE: The values in this dictionary are functions, not strings
#
test_list = {}
test_list[1] = tp_nicib_001
test_list[2] = tp_nicib_002
# Initialize the test
ctiutils.cti_init(test_list, startup, cleanup)
|
[
"alertbear@163.com"
] |
alertbear@163.com
|
8599f545f32a227a0b40dd5d25d0ed379f8a54b8
|
28acc05d2d5f19427f9936a34b3d6cabb5fba6c0
|
/CHIpy-8/v_registers.py
|
6de8b75beeb16c9321b02dffa4270ffadfaf626e
|
[
"MIT"
] |
permissive
|
HERCULESxp/CHIpy-8
|
f2bb9e01f793de6f4c0e97a8cdfedebb85d8e5bc
|
0a58b6af4ea3f8a9cab7de0f1c195674c7c77782
|
refs/heads/main
| 2023-06-19T16:59:36.870982
| 2021-07-06T12:02:22
| 2021-07-06T12:02:22
| 303,285,334
| 1
| 0
|
MIT
| 2020-11-01T01:55:01
| 2020-10-12T05:20:14
|
Python
|
UTF-8
|
Python
| false
| false
| 504
|
py
|
from unsignedbitsarray import UnsignedBitsArray
class VRegisters:
def __init__(self):
self.v = UnsignedBitsArray(8, 16) #CRIA A MEMORIA DOO REGISTRADOR COM 16 BYTES E VAZIA
def WriteValue(self, idx, value):#ESCREVE DADOS NO REGISTRADOR
self.v[idx] = value
def ReadValue(self, idx):#LER DADOS DO REGISTRADOR
return self.v[idx]
def ClearRegister():#ESVAZIA O REGISTRADOR
pass
def ShowValues(self):#MOSTRA OS VALORE DO REGISTRADOR (PARA DEBUG)
return print(self.v)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6cdc0727030d6b2c9b99f8bc0392fbf6d0932c81
|
cf10a610cdc789e329f5c9734b4dd0fa46050dbf
|
/automated_weather_shopping/winter_shopping.py
|
bc9ad5a44fc2288ab887ec095437409b4e1e26c7
|
[] |
no_license
|
rohit679/selenium_scripts
|
7c5dc520a00ff89ec24d0e7b3bb20dbbc0cbf1bf
|
5b00f33b58acd9f119eb4cefa5f67e4e125c16d8
|
refs/heads/master
| 2022-11-24T10:11:03.868104
| 2020-07-24T12:48:02
| 2020-07-24T12:48:02
| 282,218,070
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,387
|
py
|
"""
This python module is for finding out two least expensive product
in which one of them contain Aloe & other would contain Almond.
SCOPE:
1) Launch Chrome Driver
2) Navigate to site 'weathershopper.pythonanywhere.com/moisturizer'
3) Finding out the product, which satisfies our base codition
4) Click to the 'Add' button of the desired product
5) Click to the 'Go to cart' button
6) Close the browser
"""
import time
from selenium import webdriver
def price_filteration(raw_price):
"""
price_filteration is a function that takes all the price in the text format,
it will filter the actual price out by removing all irrelevant stuff & return it.
"""
price = raw_price.split("Price:")[-1]
price = price.split("Rs.")[-1]
price = int(price)
return price
def find_minimum_price(price_list):
"""
find_minimum_price is function that takes the price list,
finds the least one and return it.
"""
minimum_price = 10000
for price in price_list:
if price < minimum_price:
minimum_price = price
return minimum_price
def price_list_generator(product_list):
"""
price_list_generator is a function that takes raw product price list,
manipulate all of them and return it.
"""
price_list = []
for cost in product_list:
price = cost.text
filtered_price = price_filteration(price)
price_list.append(filtered_price)
return price_list
def aloe_price(driver):
"""
aloe_price is a function that takes driver as the arguement,
collects all the Aloe product price and return it.
"""
product_list = driver.find_elements_by_xpath("//*[contains(text(),'Aloe')]/following-sibling::p")
generated_price = price_list_generator(product_list)
return generated_price
def almond_price(driver):
"""
almond_price is a function that takes driver as the arguement,
collects all the almond product price and return it.
"""
product_list = driver.find_elements_by_xpath("//*[contains(text(),'Almond')]/following-sibling::p")
generated_price = price_list_generator(product_list)
return generated_price
def adding_aloe(driver):
"""
getting_aloe is a function that takes driver as the arguement,
finds the minimum Aloe product available and clicks to the
respective 'add' button.
"""
aloe_price_list = aloe_price(driver)
minimum_price = find_minimum_price(aloe_price_list)
# Clicking the Add button of the least expensive product having Aloe
driver.find_element_by_xpath("//div[contains(@class,'col-4') and contains(.,'{}')]\
/descendant::button[text()='Add']".format(str(minimum_price))).click()
print("Clicked the Add button of the least expensive product having Aloe")
def adding_almond(driver):
"""
adding_almond is a function that takes driver as the arguement,
finds the minimum Almond product available and clicks to the
respective 'add' button.
"""
almond_price_list = almond_price(driver)
minimum_price = find_minimum_price(almond_price_list)
# Clicking the Add button of the least expensive product having Almond
driver.find_element_by_xpath("//div[contains(@class,'col-4') and contains(.,'{}')]\
/descendant::button[text()='Add']".format(str(minimum_price))).click()
print("Clicked the Add button of the least expensive product having Almond")
" Driver code starts here "
if __name__ == "__main__":
# Creating driver
driver = webdriver.Chrome()
driver.get('https://weathershopper.pythonanywhere.com/moisturizer')
if(driver.title == "The Best Moisturizers in the World!"):
print("Success: Navigation successful")
else:
print("Failed: page Title is incorrect")
time.sleep(5)
# Calling function to add the respective product into the cart
adding_aloe(driver)
adding_almond(driver)
go_to_cart_button = driver.find_element_by_xpath("//button[contains(text(),'Cart')]")
go_to_cart_button.click()
print("Clicked go to cart button successfully")
time.sleep(3)
# Closing the browser
driver.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
5b23ea9398ef63c8d1726172c1aad40d7b1c2595
|
32e1e5ad1f24bab35e72ede517deec13154512c5
|
/cropping_package/CroppingImplementation/utils.py
|
86f7f973fb871c2d941dc8d251ed4cb5aac4731e
|
[] |
no_license
|
sondrtha/ImageCropping
|
f072b9e5e506a50bc9972c8357271075f2870bfb
|
817a9ca21c2c3ed8636d5017028cd7b285c90510
|
refs/heads/master
| 2023-08-31T10:56:29.112864
| 2021-10-18T15:01:44
| 2021-10-18T15:01:44
| 326,507,898
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from win32api import GetSystemMetrics # is used to get your screen resolution
def get_screen_resolution():
screen_resolution_width = GetSystemMetrics(0)
screen_resolution_height = GetSystemMetrics(1)
return (screen_resolution_width, screen_resolution_height)
|
[
"sondre_H_93@hotmail.com"
] |
sondre_H_93@hotmail.com
|
ddfd6d214af241497a8523161e43f40420496bea
|
5fc2e6c28853b5c3f5df65048714fe3252a69cd6
|
/python/Tools/cutSamples.py
|
6f529c1c80da99470665ccc699cbc74ff0dfbb20
|
[] |
no_license
|
zaixingmao/H2hh2bbTauTau
|
2f5fec23546a90f0055798147e8fd762307bf44f
|
44b114ce48419b502fa233ddcb8d06eb0e5219f7
|
refs/heads/master
| 2016-09-05T13:18:21.214825
| 2014-09-17T12:52:59
| 2014-09-17T12:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,778
|
py
|
#!/usr/bin/env python
import ROOT as r
import tool
from operator import itemgetter
import os
import enVars
from array import array
import optparse
import math
import varsList
import kinfit
r.gROOT.SetBatch(True)
r.gErrorIgnoreLevel = 2000
r.gStyle.SetOptStat("e")
xLabels = ['processedEvents', 'PATSkimmedEvents',
'eTau',"eleTausEleID", "eleTausEleConvRej", "eleTausElePtEta",
"eleTausTauPtEta", "eleTausDecayFound", "eleTausVLooseIsolation",
"eleTausTauMuonVeto", "eleTausTauElectronVeto", "eleTausTauElectronVetoM",
"eleTausEleIsolation", "eleTausLooseIsolation", "eleTauOS",
"muTau", "muTauId", "muTausMuonPtEta", "muTausTauPtEta", "muTausDecayFound",
"muTausVLooseTauIsolation", "muTausTauElectronVeto", "muTausTauMuonVeto",
"muTausMuonIsolation", "muTausLooseTauIsolation", "muTausLooseIsolation", "muTausOS",
'atLeastOneDiTau', 'ptEta1', 'ptEta2', 'tau1Hadronic',
'tau2Hadronic','muonVeto1', 'muonVeto2', 'eleVeto1', 'eleVeto2', 'isolation1', 'relaxed', 'myCut']
lvClass = r.Math.LorentzVector(r.Math.PtEtaPhiM4D('double'))
J1 = lvClass()
J2 = lvClass()
J3 = lvClass()
J4 = lvClass()
matchedGenJet = lvClass()
mGenJet1 = lvClass()
mGenJet2 = lvClass()
CSVJet1 = lvClass()
CSVJet2 = lvClass()
tau1 = lvClass()
tau2 = lvClass()
combinedJJ = lvClass()
sv4Vec = lvClass()
#Setup Kinematic Fit
kinfit.setup(path="/afs/hep.wisc.edu/home/zmao/myScripts/H2hh2bbTauTau/python/HHKinFit",
lib="libHHKinFit.so",)
def calcTrigOneTauEff(eta, pt, data = True, fitStart=25):
le14_da = {20: (0.898, 44.3, 1.02),
25: (0.866, 43.1, 0.86),
30: (0.839, 42.3, 0.73),
35: (0.846, 42.4, 0.78),
}
le14_mc = {20: (0.837, 43.6, 1.09),
25: (0.832, 40.4, 0.80),
30: (0.829, 40.4, 0.74),
35: (0.833, 40.1, 0.86),
}
ge16_da = {20: (0.81, 43.6, 1.09),
25: (0.76, 41.8, 0.86),
30: (0.74, 41.2, 0.75),
35: (0.74, 41.2, 0.79),
}
ge16_mc = {20: (0.70, 39.7, 0.95),
25: (0.69, 38.6, 0.74),
30: (0.69, 38.7, 0.61),
35: (0.69, 38.8, 0.61),
}
le14 = le14_da if data else le14_mc
ge16 = ge16_da if data else ge16_mc
if abs(eta) < 1.4:
d = le14
else:
d = ge16
e, x0, sigma = d[fitStart]
y = r.TMath.Erf((pt-x0)/2.0/sigma/math.sqrt(pt)) # https://github.com/rmanzoni/HTT/blob/master/CMGTools/H2TauTau/interface/TriggerEfficiency.h
#y = r.TMath.Erf((pt-x0)/sigma/math.sqrt(2.0))
return (1+y)*e/2.0
def opts():
parser = optparse.OptionParser()
parser.add_option("-l", dest="location", default="/scratch/zmao", help="location to be saved")
parser.add_option("-n", dest="nevents", default="-1", help="amount of events to be saved")
parser.add_option("-g", dest="genMatch", default="jet", help="gen particle for the reco-jet to match to")
parser.add_option("-a", dest="addFiles", default="False", help="")
options, args = parser.parse_args()
return options
options = opts()
def findFullMass(jetsList = [], sv4Vec = ''):
jetsList = sorted(jetsList, key=itemgetter(0), reverse=True)
combinedJJ = jetsList[0][1]+jetsList[1][1]
if jetsList[1][0] > 0 and jetsList[0][1].pt() > 30 and jetsList[1][1].pt() > 30 and abs(jetsList[0][1].eta()) < 2.4 and abs(jetsList[1][1].eta()) < 2.4:
return combinedJJ, jetsList[0][0], jetsList[1][0], jetsList[0][1], jetsList[1][1], (combinedJJ+sv4Vec).mass(), r.Math.VectorUtil.DeltaR(jetsList[0][1], jetsList[1][1]), jetsList[0][2], jetsList[1][2]
else:
return -1, -1, -1, -1, -1, -1, -1, -1, -1
def findGenJet(j1Name, jet1, j2Name, jet2, tChain):
genJet1 = lvClass()
genJet2 = lvClass()
genJet1.SetCoordinates(0,0,0,0)
genJet2.SetCoordinates(0,0,0,0)
if varsList.findVarInChain(tChain, '%sGenPt' %j1Name) > 0 and varsList.findVarInChain(tChain, '%sGenMass' %j1Name) > 0:
genJet1.SetCoordinates(varsList.findVarInChain(tChain, '%sGenPt' %j1Name),
varsList.findVarInChain(tChain, '%sGenEta' %j1Name),
varsList.findVarInChain(tChain, '%sGenPhi' %j1Name),
varsList.findVarInChain(tChain, '%sGenMass' %j2Name))
if varsList.findVarInChain(tChain, '%sGenPt' %j2Name) > 0 and varsList.findVarInChain(tChain, '%sGenMass' %j2Name) > 0:
genJet2.SetCoordinates(varsList.findVarInChain(tChain, '%sGenPt' %j2Name),
varsList.findVarInChain(tChain, '%sGenEta' %j2Name),
varsList.findVarInChain(tChain, '%sGenPhi' %j2Name),
varsList.findVarInChain(tChain, '%sGenMass' %j2Name))
dR1 = r.Math.VectorUtil.DeltaR(genJet1, jet1)
dR2 = r.Math.VectorUtil.DeltaR(genJet2, jet2)
return dR1, genJet1, dR2, genJet2
def findGenBJet(jet1, jet2, tChain):
genJet1 = lvClass()
genJet2 = lvClass()
genJet1.SetCoordinates(0,0,0,0)
genJet2.SetCoordinates(0,0,0,0)
tmpJet = lvClass()
tmpJet.SetCoordinates(0,0,0,0)
dR1 = 0.5
dR2 = 0.5
for i in range(tChain.genBPt.size()):
tmpJet.SetCoordinates(tChain.genBPt.at(i), tChain.genBEta.at(i), tChain.genBPhi.at(i), tChain.genBMass.at(i))
tmpDR1 = r.Math.VectorUtil.DeltaR(tmpJet, jet1)
if dR1 > tmpDR1:
dR1 = tmpDR1
genJet1.SetCoordinates(tChain.genBPt.at(i), tChain.genBEta.at(i), tChain.genBPhi.at(i), tChain.genBMass.at(i))
for i in range(tChain.genBPt.size()):
tmpJet.SetCoordinates(tChain.genBPt.at(i), tChain.genBEta.at(i), tChain.genBPhi.at(i), tChain.genBMass.at(i))
tmpDR2 = r.Math.VectorUtil.DeltaR(tmpJet, jet2)
if dR2 > tmpDR2 and genJet1 != tmpJet:
dR2 = tmpDR2
genJet2.SetCoordinates(tChain.genBPt.at(i), tChain.genBEta.at(i), tChain.genBPhi.at(i), tChain.genBMass.at(i))
if genJet1 == genJet2:
print ' WARNING:: Matched to the same b quark (b mass = %.3f)' %genJet2.mass()
return dR1, genJet1, dR2, genJet2
def getRegVars(jName, tChain):
jet = lvClass()
SoftLeptPt = 0
jet.SetCoordinates(varsList.findVarInChain_Data(tChain, '%sPt' %jName), varsList.findVarInChain_Data(tChain,'%sEta' %jName),
varsList.findVarInChain_Data(tChain, '%sPhi' %jName), 0)
if varsList.findVarInChain_Data(tChain,'%sSoftLeptPID' %jName) == 0:
SoftLeptPtRel = 0
SoftLeptdR = 0
else:
SoftLeptPtRel = varsList.findVarInChain_Data(tChain,'%sPt' %jName) - varsList.findVarInChain_Data(tChain,'%sSoftLeptPt' %jName)
softLept = lvClass()
softLept.SetCoordinates(varsList.findVarInChain_Data(tChain, '%sSoftLeptPt' %jName), varsList.findVarInChain_Data(tChain,'%sSoftLeptEta' %jName),
varsList.findVarInChain_Data(tChain, '%sSoftLeptPhi' %jName), 0)
SoftLeptdR = r.Math.VectorUtil.DeltaR(softLept, jet)
SoftLeptPt = varsList.findVarInChain_Data(tChain, '%sSoftLeptPt' %jName)
if SoftLeptPt < 0:
SoftLeptPt = 0
return varsList.findVarInChain_Data(tChain, '%sPtUncorr' %jName), varsList.findVarInChain_Data(tChain, '%sEt' %jName), varsList.findVarInChain_Data(tChain, '%sMt' %jName), varsList.findVarInChain_Data(tChain, '%sptLeadTrk' %jName), varsList.findVarInChain_Data(tChain, '%sVtx3dL' %jName),varsList.findVarInChain_Data(tChain, '%sVtx3deL' %jName), varsList.findVarInChain_Data(tChain, '%svtxMass' %jName), varsList.findVarInChain_Data(tChain, '%sVtxPt' %jName), varsList.findVarInChain_Data(tChain, '%sJECUnc' %jName), float(varsList.findVarInChain_Data(tChain, '%sNtot' %jName)), SoftLeptPtRel, SoftLeptPt, SoftLeptdR
def setDPhiInRange(dPhi):
if dPhi > 3.14:
return 6.283-dPhi
else:
return dPhi
def calcdPhiMetValues(tau1Phi, tau2Phi, j1Phi, j2Phi, metPhi, tauTauPhi, jjPhi, svPhi):
return setDPhiInRange(abs(tau1Phi - metPhi)), setDPhiInRange(abs(tau2Phi - metPhi)), setDPhiInRange(abs(j1Phi - metPhi)), setDPhiInRange(abs(j2Phi - metPhi)), setDPhiInRange(abs(tauTauPhi - metPhi)), setDPhiInRange(abs(jjPhi - metPhi)), setDPhiInRange(abs(svPhi - metPhi))
r.gStyle.SetOptStat(0)
signalEntries = enVars.signalEntries
ttEntries = enVars.ttEntries
ZZEntries = enVars.ZZEntries
#*******Get Sample Name and Locations******
sampleLocations = enVars.sampleLocations
preVarList = ['EVENT', 'HMass', 'svMass', 'svPt', 'svEta', 'svPhi', 'J1Pt', 'J1Eta','J1Phi', 'J1Mass', 'NBTags', 'iso1', 'iso2', 'mJJ', 'J2Pt', 'J2Eta','J2Phi', 'J2Mass','pZeta', 'pZ', 'm1', 'm2',
'pZV', 'J3Pt', 'J3Eta','J3Phi', 'J3Mass', 'J4Pt', 'J4Eta','J4Phi', 'J4Mass', 'J1CSVbtag', 'J2CSVbtag', 'J3CSVbtag', 'J4CSVbtag', 'pt1', 'eta1', 'phi1', 'pt2', 'eta2', 'phi2', 'met',
'charge1', 'charge2', 'metphi',
'J1PtUncorr', 'J1VtxPt', 'J1Vtx3dL', 'J1Vtx3deL', 'J1ptLeadTrk', 'J1vtxMass', 'J1vtxPt', 'J1Ntot',
'J1SoftLepPt', 'J1SoftLepEta', 'J1SoftLepPhi', 'J1SoftLepPID', 'J1JECUnc', 'J1Et', 'J1Mt',
'J2PtUncorr', 'J2VtxPt', 'J2Vtx3dL', 'J2Vtx3deL', 'J2ptLeadTrk', 'J2vtxMass', 'J2vtxPt', 'J2Ntot',
'J2SoftLepPt', 'J2SoftLepEta', 'J2SoftLepPhi', 'J2SoftLepPID', 'J2JECUnc', 'J2Et', 'J2Mt',
'J3PtUncorr', 'J3VtxPt', 'J3Vtx3dL', 'J3Vtx3deL', 'J3ptLeadTrk', 'J3vtxMass', 'J3vtxPt', 'J3Ntot',
'J3SoftLepPt', 'J3SoftLepEta', 'J3SoftLepPhi', 'J3SoftLepPID', 'J3JECUnc', 'J3Et', 'J3Mt',
'J4PtUncorr', 'J4VtxPt', 'J4Vtx3dL', 'J4Vtx3deL', 'J4ptLeadTrk', 'J4vtxMass', 'J4vtxPt', 'J4Ntot',
'J4SoftLepPt', 'J4SoftLepEta', 'J4SoftLepPhi', 'J4SoftLepPID', 'J4JECUnc', 'J4Et', 'J4Mt', 'tauDecayMode1', 'tauDecayMode2',
'mvacov00','mvacov01','mvacov10','mvacov11', 'byIsolationMVA2raw_1', 'byIsolationMVA2raw_2'
]
genVarList = ['genBPt', 'genBEta', 'genBPhi','genBMass', 'genTauPt', 'genTauEta', 'genTauPhi', 'genElePt', 'genEleEta',
'genElePhi', 'genMuPt', 'genMuEta', 'genMuPhi','J1GenPt', 'J1GenEta', 'J1GenPhi', 'J1GenMass',
'J2GenPt', 'J2GenEta', 'J2GenPhi', 'J2GenMass', 'J3GenPt', 'J3GenEta', 'J3GenPhi', 'J3GenMass',
'J4GenPt', 'J4GenEta', 'J4GenPhi', 'J4GenMass']
fullVarList = []
for iVar in preVarList:
fullVarList.append(iVar)
for iVar in genVarList:
fullVarList.append(iVar)
blackList = enVars.corruptedROOTfiles
for iSample, iLocation in sampleLocations:
if 'data' in iSample:
isData = True
varList = preVarList
else:
isData = False
varList = fullVarList
cutFlow = r.TH1F('cutFlow', '', len(xLabels), 0, len(xLabels))
if options.addFiles == 'True':
tool.addHistFromFiles(dirName=iLocation, histName = "preselection", hist = cutFlow, xAxisLabels=xLabels)
else:
tool.addHistFromFiles(dirName=iLocation, histName = "TT/results", hist = cutFlow, xAxisLabels=xLabels)
cutFlow.SetName('preselection')
if options.addFiles == 'True':
iChain = r.TChain("eventTree")
else:
iChain = r.TChain("ttTreeBeforeChargeCut/eventTree")
nEntries = tool.addFiles(ch=iChain, dirName=iLocation, knownEventNumber=signalEntries, printTotalEvents=True, blackList=blackList)
iChain.SetBranchStatus("*",0)
for iVar in range(len(varList)):
iChain.SetBranchStatus(varList[iVar],1)
fullMass = array('f', [0.])
mJJ = array('f', [0.])
ptJJ = array('f', [0.])
etaJJ = array('f', [0.])
phiJJ = array('f', [0.])
CSVJ1 = array('f', [0.])
CSVJ1Pt = array('f', [0.])
CSVJ1Eta = array('f', [0.])
CSVJ1Phi = array('f', [0.])
CSVJ1Mass = array('f', [0.])
CSVJ2 = array('f', [0.])
CSVJ2Pt = array('f', [0.])
CSVJ2Eta = array('f', [0.])
CSVJ2Phi = array('f', [0.])
CSVJ2Mass = array('f', [0.])
dRTauTau = array('f', [0.])
dRJJ = array('f', [0.])
dRhh = array('f', [0.])
mTop1 = array('f', [0.])
mTop2 = array('f', [0.])
pZ_new = array('f', [0.])
pZV_new = array('f', [0.])
pZ_new2 = array('f', [0.])
pZV_new2 = array('f', [0.])
triggerEff = array('f', [0.])
triggerEff1 = array('f', [0.])
triggerEff2 = array('f', [0.])
metTau1DPhi = array('f', [0.])
metTau2DPhi = array('f', [0.])
metJ1DPhi = array('f', [0.])
metJ2DPhi = array('f', [0.])
metTauPairDPhi = array('f', [0.])
metJetPairDPhi = array('f', [0.])
metSvTauPairDPhi = array('f', [0.])
dRGenJet1Match = array('f', [0.])
dRGenJet2Match = array('f', [0.])
matchGenJet1Pt = array('f', [0.])
matchGenJet1Eta = array('f', [0.])
matchGenJet1Phi = array('f', [0.])
matchGenJet1Mass = array('f', [0.])
matchGenJet2Pt = array('f', [0.])
matchGenJet2Eta = array('f', [0.])
matchGenJet2Phi = array('f', [0.])
matchGenJet2Mass = array('f', [0.])
matchGenMJJ = array('f', [0.])
matchGenPtJJ = array('f', [0.])
matchGendRJJ = array('f', [0.])
CSVJ1PtUncorr = array('f', [0.])
CSVJ1Et = array('f', [0.])
CSVJ1Mt = array('f', [0.])
CSVJ1ptLeadTrk = array('f', [0.])
CSVJ1Vtx3dL = array('f', [0.])
CSVJ1Vtx3deL = array('f', [0.])
CSVJ1vtxMass = array('f', [0.])
CSVJ1VtxPt = array('f', [0.])
CSVJ1JECUnc = array('f', [0.])
CSVJ1Ntot = array('f', [0.])
CSVJ1SoftLeptPtRel = array('f', [0.])
CSVJ1SoftLeptPt = array('f', [0.])
CSVJ1SoftLeptdR = array('f', [0.])
CSVJ2PtUncorr = array('f', [0.])
CSVJ2Et = array('f', [0.])
CSVJ2Mt = array('f', [0.])
CSVJ2ptLeadTrk = array('f', [0.])
CSVJ2Vtx3dL = array('f', [0.])
CSVJ2Vtx3deL = array('f', [0.])
CSVJ2vtxMass = array('f', [0.])
CSVJ2VtxPt = array('f', [0.])
CSVJ2JECUnc = array('f', [0.])
CSVJ2Ntot = array('f', [0.])
CSVJ2SoftLeptPtRel = array('f', [0.])
CSVJ2SoftLeptPt = array('f', [0.])
CSVJ2SoftLeptdR = array('f', [0.])
chi2KinFit = array('f', [0.])
fMassKinFit = array('f', [0.])
iChain.LoadTree(0)
iTree = iChain.GetTree().CloneTree(0)
iSample = iSample + '_%s' %('all' if options.nevents == "-1" else options.nevents)
iFile = r.TFile("%s/%s.root" %(options.location,iSample),"recreate")
iTree.Branch("fMass", fullMass, "fMass/F")
iTree.Branch("mJJ", mJJ, "mJJ/F")
iTree.Branch("etaJJ", etaJJ, "etaJJ/F")
iTree.Branch("phiJJ", phiJJ, "phiJJ/F")
iTree.Branch("ptJJ", ptJJ, "ptJJ/F")
iTree.Branch("CSVJ1", CSVJ1, "CSVJ1/F")
iTree.Branch("CSVJ1Pt", CSVJ1Pt, "CSVJ1Pt/F")
iTree.Branch("CSVJ1Eta", CSVJ1Eta, "CSVJ1Eta/F")
iTree.Branch("CSVJ1Phi", CSVJ1Phi, "CSVJ1Phi/F")
iTree.Branch("CSVJ1Mass", CSVJ1Mass, "CSVJ1Mass/F")
iTree.Branch("CSVJ2", CSVJ2, "CSVJ2/F")
iTree.Branch("CSVJ2Pt", CSVJ2Pt, "CSVJ2Pt/F")
iTree.Branch("CSVJ2Eta", CSVJ2Eta, "CSVJ2Eta/F")
iTree.Branch("CSVJ2Phi", CSVJ2Phi, "CSVJ2Phi/F")
iTree.Branch("CSVJ2Mass", CSVJ2Mass, "CSVJ2Mass/F")
iTree.Branch("dRTauTau", dRTauTau, "dRTauTau/F")
iTree.Branch("dRJJ", dRJJ, "dRJJ/F")
iTree.Branch("dRhh", dRhh, "dRhh/F")
iTree.Branch("mTop1", mTop1, "mTop1/F")
iTree.Branch("mTop2", mTop2, "mTop2/F")
iTree.Branch("pZ_new", pZ_new, "pZ_new/F")
iTree.Branch("pZV_new", pZV_new, "pZV_new/F")
iTree.Branch("pZ_new2", pZ_new2, "pZ_new2/F")
iTree.Branch("pZV_new2", pZV_new2, "pZV_new2/F")
iTree.Branch("triggerEff", triggerEff, "triggerEff/F")
iTree.Branch("triggerEff1", triggerEff1, "triggerEff1/F")
iTree.Branch("triggerEff2", triggerEff2, "triggerEff2/F")
iTree.Branch("metTau1DPhi", metTau1DPhi, "metTau1DPhi/F")
iTree.Branch("metTau2DPhi", metTau2DPhi, "metTau2DPhi/F")
iTree.Branch("metJ1DPhi", metJ1DPhi, "metJ1DPhi/F")
iTree.Branch("metJ2DPhi", metJ2DPhi, "metJ2DPhi/F")
iTree.Branch("metTauPairDPhi", metTauPairDPhi, "metTauPairDPhi/F")
iTree.Branch("metJetPairDPhi", metJetPairDPhi, "metJetPairDPhi/F")
iTree.Branch("metSvTauPairDPhi", metSvTauPairDPhi, "metSvTauPairDPhi/F")
iTree.Branch("chi2KinFit", chi2KinFit, "chi2KinFit/F")
iTree.Branch("fMassKinFit", fMassKinFit, "fMassKinFit/F")
if not isData:
iTree.Branch("dRGenJet1Match", dRGenJet1Match, "dRGenJet1Match/F")
iTree.Branch("dRGenJet2Match", dRGenJet2Match, "dRGenJet2Match/F")
iTree.Branch("matchGenJet1Pt", matchGenJet1Pt, "matchGenJet1Pt/F")
iTree.Branch("matchGenJet1Eta", matchGenJet1Eta, "matchGenJet1Eta/F")
iTree.Branch("matchGenJet1Phi", matchGenJet1Phi, "matchGenJet1Phi/F")
iTree.Branch("matchGenJet1Mass", matchGenJet1Mass, "matchGenJet1Mass/F")
iTree.Branch("matchGenJet2Pt", matchGenJet2Pt, "matchGenJet2Pt/F")
iTree.Branch("matchGenJet2Eta", matchGenJet2Eta, "matchGenJet2Eta/F")
iTree.Branch("matchGenJet2Phi", matchGenJet2Phi, "matchGenJet2Phi/F")
iTree.Branch("matchGenJet2Mass", matchGenJet2Mass, "matchGenJet2Mass/F")
iTree.Branch("matchGenMJJ", matchGenMJJ, "matchGenMJJ/F")
iTree.Branch("matchGenPtJJ", matchGenPtJJ, "matchGenPtJJ/F")
iTree.Branch("matchGendRJJ", matchGendRJJ, "matchGendRJJ/F")
iTree.Branch("CSVJ1PtUncorr",CSVJ1PtUncorr,"CSVJ1PtUncorr/F")
iTree.Branch("CSVJ1Et",CSVJ1Et,"CSVJ1Et/F")
iTree.Branch("CSVJ1Mt",CSVJ1Mt,"CSVJ1Mt/F")
iTree.Branch("CSVJ1ptLeadTrk",CSVJ1ptLeadTrk,"CSVJ1ptLeadTrk/F")
iTree.Branch("CSVJ1Vtx3dL",CSVJ1Vtx3dL,"CSVJ1Vtx3dL/F")
iTree.Branch("CSVJ1Vtx3deL",CSVJ1Vtx3deL,"CSVJ1Vtx3deL/F")
iTree.Branch("CSVJ1vtxMass",CSVJ1vtxMass,"CSVJ1vtxMass/F")
iTree.Branch("CSVJ1VtxPt",CSVJ1VtxPt,"CSVJ1VtxPt/F")
iTree.Branch("CSVJ1JECUnc",CSVJ1JECUnc,"CSVJ1JECUnc/F")
iTree.Branch("CSVJ1Ntot",CSVJ1Ntot,"CSVJ1Ntot/F")
iTree.Branch("CSVJ1SoftLeptPtRel",CSVJ1SoftLeptPtRel,"CSVJ1SoftLeptPtRel/F")
iTree.Branch("CSVJ1SoftLeptPt",CSVJ1SoftLeptPt,"CSVJ1SoftLeptPt/F")
iTree.Branch("CSVJ1SoftLeptdR",CSVJ1SoftLeptdR,"CSVJ1SoftLeptdR/F")
iTree.Branch("CSVJ2PtUncorr",CSVJ2PtUncorr,"CSVJ2PtUncorr/F")
iTree.Branch("CSVJ2Et",CSVJ2Et,"CSVJ2Et/F")
iTree.Branch("CSVJ2Mt",CSVJ2Mt,"CSVJ2Mt/F")
iTree.Branch("CSVJ2ptLeadTrk",CSVJ2ptLeadTrk,"CSVJ2ptLeadTrk/F")
iTree.Branch("CSVJ2Vtx3dL",CSVJ2Vtx3dL,"CSVJ2Vtx3dL/F")
iTree.Branch("CSVJ2Vtx3deL",CSVJ2Vtx3deL,"CSVJ2Vtx3deL/F")
iTree.Branch("CSVJ2vtxMass",CSVJ2vtxMass,"CSVJ2vtxMass/F")
iTree.Branch("CSVJ2VtxPt",CSVJ2VtxPt,"CSVJ2VtxPt/F")
iTree.Branch("CSVJ2JECUnc",CSVJ2JECUnc,"CSVJ2JECUnc/F")
iTree.Branch("CSVJ2Ntot",CSVJ2Ntot,"CSVJ2Ntot/F")
iTree.Branch("CSVJ2SoftLeptPtRel",CSVJ2SoftLeptPtRel,"CSVJ2SoftLeptPtRel/F")
iTree.Branch("CSVJ2SoftLeptPt",CSVJ2SoftLeptPt,"CSVJ2SoftLeptPt/F")
iTree.Branch("CSVJ2SoftLeptdR",CSVJ2SoftLeptdR,"CSVJ2SoftLeptdR/F")
counter = 0
for iEntry in range(nEntries):
iChain.LoadTree(iEntry)
iChain.GetEntry(iEntry)
if counter == int(options.nevents):
break
if iChain.svMass.size() == 0:
continue
if not tool.calc(iChain):
continue
# if iChain.charge1.at(0) - iChain.charge2.at(0) == 0: #sign requirement
# continue
if iChain.pt1.at(0)<45 or iChain.pt2.at(0)<45: #pt cut
continue
if abs(iChain.eta1.at(0))>2.1 or abs(iChain.eta2.at(0))>2.1: #pt cut
continue
# if iChain.iso1.at(0)<1.5 or iChain.iso2.at(0)<1.5: #iso cut
# continue
jetsList = [(iChain.J1CSVbtag, J1.SetCoordinates(iChain.J1Pt, iChain.J1Eta, iChain.J1Phi, iChain.J1Mass), 'J1'),
(iChain.J2CSVbtag, J2.SetCoordinates(iChain.J2Pt, iChain.J2Eta, iChain.J2Phi, iChain.J2Mass), 'J2'),
(iChain.J3CSVbtag, J3.SetCoordinates(iChain.J3Pt, iChain.J3Eta, iChain.J3Phi, iChain.J3Mass), 'J3'),
(iChain.J4CSVbtag, J4.SetCoordinates(iChain.J4Pt, iChain.J4Eta, iChain.J4Phi, iChain.J4Mass), 'J4')]
sv4Vec.SetCoordinates(iChain.svPt.at(0), iChain.svEta.at(0), iChain.svPhi.at(0), iChain.svMass.at(0))
bb = lvClass()
bb, CSVJ1[0], CSVJ2[0], CSVJet1, CSVJet2, fullMass[0], dRJJ[0], j1Name, j2Name = findFullMass(jetsList=jetsList, sv4Vec=sv4Vec)
if bb == -1:
continue
matchGenJet1Pt[0] = 0
matchGenJet2Pt[0] = 0
if not isData:
if options.genMatch == 'jet':
dRGenJet1Match[0], mGenJet1, dRGenJet2Match[0], mGenJet2 = findGenJet(j1Name, CSVJet1, j2Name, CSVJet2, iChain)
else:
dRGenJet1Match[0], mGenJet1, dRGenJet2Match[0], mGenJet2 = findGenBJet(CSVJet1, CSVJet2, iChain)
matchGenJet1Pt[0] = mGenJet1.pt()
matchGenJet1Eta[0] = mGenJet1.eta()
matchGenJet1Phi[0] = mGenJet1.phi()
matchGenJet1Mass[0] = mGenJet1.mass()
matchGenJet2Pt[0] = mGenJet2.pt()
matchGenJet2Eta[0] = mGenJet2.eta()
matchGenJet2Phi[0] = mGenJet2.phi()
matchGenJet2Mass[0] = mGenJet2.mass()
genJJ = mGenJet1 + mGenJet2
matchGenMJJ[0] = genJJ.mass()
matchGenPtJJ[0] = genJJ.pt()
matchGendRJJ[0] = r.Math.VectorUtil.DeltaR(mGenJet1, mGenJet2)
if matchGenMJJ[0] < 0:
matchGenMJJ[0] = 0
matchGenPtJJ[0] = 0
CSVJ1Pt[0] = CSVJet1.pt()
CSVJ1Eta[0] = CSVJet1.eta()
CSVJ1Phi[0] = CSVJet1.phi()
CSVJ1Mass[0] = CSVJet1.mass()
CSVJ2Pt[0] = CSVJet2.pt()
CSVJ2Eta[0] = CSVJet2.eta()
CSVJ2Phi[0] = CSVJet2.phi()
CSVJ2Mass[0] = CSVJet2.mass()
CSVJ1PtUncorr[0], CSVJ1Et[0], CSVJ1Mt[0], CSVJ1ptLeadTrk[0], CSVJ1Vtx3dL[0], CSVJ1Vtx3deL[0], CSVJ1vtxMass[0], CSVJ1VtxPt[0], CSVJ1JECUnc[0], CSVJ1Ntot[0], CSVJ1SoftLeptPtRel[0], CSVJ1SoftLeptPt[0], CSVJ1SoftLeptdR[0] = getRegVars(j1Name, iChain)
CSVJ2PtUncorr[0], CSVJ2Et[0], CSVJ2Mt[0], CSVJ2ptLeadTrk[0], CSVJ2Vtx3dL[0], CSVJ2Vtx3deL[0], CSVJ2vtxMass[0], CSVJ2VtxPt[0], CSVJ2JECUnc[0], CSVJ2Ntot[0], CSVJ2SoftLeptPtRel[0], CSVJ2SoftLeptPt[0], CSVJ2SoftLeptdR[0] = getRegVars(j2Name, iChain)
if CSVJ1Vtx3dL[0] == -10:
CSVJ1Vtx3dL[0] = 0
CSVJ1Vtx3deL[0] = 0
CSVJ1vtxMass[0] = 0
CSVJ1VtxPt[0] = 0
if CSVJ1ptLeadTrk[0] < 0:
CSVJ1ptLeadTrk[0] = 0
if CSVJ2Vtx3dL[0] == -10:
CSVJ2Vtx3dL[0] = 0
CSVJ2Vtx3deL[0] = 0
CSVJ2vtxMass[0] = 0
CSVJ2VtxPt[0] = 0
if CSVJ2ptLeadTrk[0] < 0:
CSVJ2ptLeadTrk[0] = 0
if CSVJ1SoftLeptPtRel[0] == -10:
CSVJ1SoftLeptPtRel[0] == 0
CSVJ1SoftLeptPt[0] == 0
if CSVJ2SoftLeptPtRel[0] == -10:
CSVJ2SoftLeptPtRel[0] == 0
CSVJ2SoftLeptPt[0] == 0
ptJJ[0] = bb.pt()
etaJJ[0] = bb.eta()
phiJJ[0] = bb.phi()
mJJ[0] = bb.mass()
tau1.SetCoordinates(iChain.pt1.at(0), iChain.eta1.at(0), iChain.phi1.at(0), iChain.m1.at(0))
tau2.SetCoordinates(iChain.pt2.at(0), iChain.eta2.at(0), iChain.phi2.at(0), iChain.m2.at(0))
mTop1[0] = (CSVJet1 + tau1).mass()
mTop2[0] = (CSVJet2 + tau2).mass()
pZ_new[0] = iChain.pZ/iChain.svPt.at(0)
pZV_new[0] = iChain.pZV/iChain.svPt.at(0)
pZ_new2[0] = iChain.pZ/fullMass[0]
pZV_new2[0] = iChain.pZV/fullMass[0]
dRTauTau[0] = r.Math.VectorUtil.DeltaR(tau1, tau2)
dRhh[0] = r.Math.VectorUtil.DeltaR(bb, sv4Vec)
metTau1DPhi[0], metTau2DPhi[0], metJ1DPhi[0], metJ2DPhi[0], metTauPairDPhi[0], metJetPairDPhi[0], metSvTauPairDPhi[0] = calcdPhiMetValues(iChain.phi1.at(0), iChain.phi2.at(0), CSVJet1.phi(), CSVJet2.phi(), iChain.metphi.at(0), (tau1+tau2).phi(), bb.phi(), iChain.svPhi.at(0))
eff1 = calcTrigOneTauEff(eta=iChain.eta1.at(0), pt=iChain.pt1.at(0), data = True, fitStart=25)
eff2 = calcTrigOneTauEff(eta=iChain.eta2.at(0), pt=iChain.pt2.at(0), data = True, fitStart=25)
triggerEff1[0] = eff1
triggerEff2[0] = eff2
triggerEff[0] = eff1*eff2
if isData:
triggerEff[0] = 1
triggerEff1[0] = 1
triggerEff2[0] = 1
#For Kinematic Fit
chi2KinFit[0], fMassKinFit[0] = kinfit.fit(iChain, CSVJet1, CSVJet2)
iTree.Fill()
counter += 1
tool.printProcessStatus(iEntry, nEntries, 'Saving to file %s.root' %(iSample))
print ' -- saved %d events' %(counter)
tool.addEventsCount2Hist(hist = cutFlow, count = counter, labelName = 'myCut')
iFile.cd()
cutFlow.Write()
iTree.Write()
iFile.Close()
|
[
"zaixing.mao@cern.ch"
] |
zaixing.mao@cern.ch
|
23cd20f6d2f55bff7d41ec47fb119325981ec932
|
ad5e973d7da2b3d8d212ce6530f9e626e6d7b68d
|
/api/tests.py
|
26fa2019c09773b507abc11942d7c054f6e2d72a
|
[
"MIT"
] |
permissive
|
mateusvictor/School-API
|
536c45d521ef37a6f2bdc9dc7222ac836c798336
|
eff0ff3ee8d07fc89e8688615ec685e3dbfc0478
|
refs/heads/main
| 2023-06-15T18:20:42.710653
| 2021-06-15T15:28:41
| 2021-06-15T15:28:41
| 371,870,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,012
|
py
|
from decimal import Decimal
from django.urls import reverse
from django.forms.models import model_to_dict
import json
from rest_framework import status
from rest_framework.test import APITestCase
from api.models import *
class ProfessorTests(APITestCase):
def test_create_professor(self):
"""
Ensure we can create a new professor object
"""
url = reverse('professor-list')
data = {
'person': {
'first_name': 'Mateus',
'last_name': 'Victor',
'date_of_birth': '2000-11-21',
'email': 'victors_@yahoo.com',
'address': {
'country': 'Brazil',
'state': 'Sao Paulo',
'city': 'Sao Paulo',
'street': 'Avenida Brasil, 2111',
'postal_code': '21232-009',
}
},
'salary': 9213.12,
'entry_year': 2000
}
response = self.client.post(url, data, format='json')
response_dict = dict(response.data)
professor_object = Professor.objects.get(pk=int(response_dict['id']))
person_object = professor_object.person
address_object = person_object.address
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(data['person']['first_name'], person_object.first_name)
self.assertEqual(data['person']['address']['street'], address_object.street)
self.assertEqual(Decimal(str(data['salary'])), professor_object.salary)
class StudentTests(APITestCase):
def test_create_student(self):
"""
Ensure we can create a new student object
"""
url = reverse('student-list')
data = {
'person': {
'first_name': 'Caio',
'last_name': 'Castro',
'date_of_birth': '1982-06-19',
'email': 'castroo@outlook.com',
'address': {
'country': 'Brazil',
'state': 'Sao Paulo',
'city': 'Sao Paulo',
'street': 'Avenida Brasil, 21',
'postal_code': '01132-901',
'complement': 'Apartamento 10'
}
}
}
response = self.client.post(url, data, format='json')
response_dict = dict(response.data)
student_object = Student.objects.get(pk=int(response_dict['id']))
person_object = student_object.person
address_object = person_object.address
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(data['person']['first_name'], person_object.first_name)
self.assertEqual(data['person']['address']['street'], address_object.street)
class CourseTests(APITestCase):
def test_create_course(self):
"""
Ensure we can create a new course object
"""
ProfessorTests.test_create_professor(self) # Creating a professor object
url = reverse('course-list')
data = {
'name': 'Data Structures Part II',
'description': 'Graphs, Trees, Binary Search Trees and more.',
'professor': 1
}
response = self.client.post(url, data, format='json')
response_dict = dict(response.data)
course_object = Course.objects.get(pk=int(response_dict['id']))
professor_object = Professor.objects.get(pk=response_dict['professor'])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(data['name'], course_object.name)
self.assertEqual(data['description'], course_object.description)
self.assertEqual(data['professor'], professor_object.id)
self.assertEqual(response_dict['course_instances'], [])
self.assertEqual(response_dict['students_count'], 0)
class EnrollTests(APITestCase):
def test_enroll_student_in_course(self):
"""
Ensure we can enroll a student in a course
"""
StudentTests.test_create_student(self)
CourseTests.test_create_course(self)
url = reverse('enroll')
data ={
'course': 1,
'student': 1
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_unenroll_student_in_course(self):
"""
Ensure we can unenroll a student in a course
"""
EnrollTests.test_enroll_student_in_course(self)
url = reverse('unenroll')
data = {
'course': 1,
'student': 1
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
[
"mateus_victors@outlook.com"
] |
mateus_victors@outlook.com
|
dfdf08871a317bd868aaf96af910a062abbb121c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03005/s702594618.py
|
664af542c17dd1cc55af2decc0b25ecea2d985cf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
import sys
n,k = input().split()
n,k = int(n), int(k)
if k==1 :
print(0)
else :
print(n-k)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1158f74e6b942c69297d5244445530ce24e8d271
|
e3968e2117ce57631659f16a0c5bf1ffbcdb44c8
|
/configuration.py
|
09b1799a80e06f02b8baab3b5c16425c3c7326de
|
[
"MIT"
] |
permissive
|
PAndaContron/rucs24-bot
|
fa0c767c68ad5f28ec28406929fb65f85dca81f7
|
96b6f8564154473c9e3919f9d2649d4d293570a3
|
refs/heads/master
| 2023-02-02T01:27:39.291834
| 2020-09-22T00:28:56
| 2020-09-22T00:28:56
| 297,161,678
| 0
| 0
|
MIT
| 2020-09-20T20:52:58
| 2020-09-20T20:52:57
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
import json
with open("config.json") as config_file:
config = json.load(config_file)
|
[
"cepheac@protonmail.ch"
] |
cepheac@protonmail.ch
|
3a42023dfd9ac8cc3bbee4b8459c832bd62732a1
|
9e38b45f555ffa08fe036b7b0429871ccdd85303
|
/Python/string_split_and_join.py
|
8b17eac33966a7d952c106de079a896dbe6307f7
|
[] |
no_license
|
shayaankhan05/HackerRank
|
b066969b0514046bd8620b55d0458d8284a12005
|
a975fac85af80310ec2ec5f6275c94ceefe3715b
|
refs/heads/master
| 2023-06-01T09:06:23.374474
| 2021-06-24T08:10:38
| 2021-06-24T08:10:38
| 294,485,980
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
def split_and_join(line):
a = line
a = a.split(" ")
a = "-".join(a)
return a
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
|
[
"you@example.com"
] |
you@example.com
|
c98a3094d349575fd5198624b1554161571e84e2
|
95c0a5ce24a2d05740772a27d059c7f456eaa431
|
/tests/test_base.py
|
a58436878826f88e4bebd049041315f2a8326e3c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
enquos/sqlite-fts-python
|
a1b2497a716af3d4d1f611160045474f80e32ba4
|
e4f33a17afcd5f4af592416758004b508784cd96
|
refs/heads/master
| 2021-01-16T18:09:28.122733
| 2017-08-10T06:04:59
| 2017-08-10T06:04:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,224
|
py
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import sqlite3
import struct
import re
from cffi import FFI
import sqlitefts as fts
ffi = FFI()
class SimpleTokenizer(fts.Tokenizer):
_p = re.compile(r'\w+', re.UNICODE)
def tokenize(self, text):
for m in self._p.finditer(text):
s, e = m.span()
t = text[s:e]
l = len(t.encode('utf-8'))
p = len(text[:s].encode('utf-8'))
yield t, p, p + l
def test_make_tokenizer():
c = sqlite3.connect(':memory:')
tm = fts.make_tokenizer_module(SimpleTokenizer())
assert all(
getattr(tm, x) is not None
for x in ('iVersion', 'xClose', 'xCreate', 'xDestroy', 'xLanguageid',
'xNext', 'xOpen'))
c.close()
def test_register_tokenizer():
name = 'simpe'
c = sqlite3.connect(':memory:')
tokenizer_module = fts.make_tokenizer_module(SimpleTokenizer())
fts.register_tokenizer(c, name, tokenizer_module)
v = c.execute("SELECT FTS3_TOKENIZER(?)", (name, )).fetchone()[0]
assert int(ffi.cast('intptr_t', tokenizer_module)) == \
struct.unpack("P", v)[0]
c.close()
def test_createtable():
c = sqlite3.connect(':memory:')
c.row_factory = sqlite3.Row
name = 'simple'
sql = "CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name)
fts.register_tokenizer(c, name,
fts.make_tokenizer_module(SimpleTokenizer()))
c.execute(sql)
r = c.execute(
"SELECT * FROM sqlite_master WHERE type='table' AND name='fts'").fetchone(
)
assert r
assert r[str('type')] == 'table' and r[str('name')] == 'fts' and r[str(
'tbl_name')] == 'fts'
assert r[str('sql')].upper() == sql.upper()
c.close()
def test_insert():
c = sqlite3.connect(':memory:')
c.row_factory = sqlite3.Row
name = 'simple'
content = 'これは日本語で書かれています'
fts.register_tokenizer(c, name,
fts.make_tokenizer_module(SimpleTokenizer()))
c.execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.execute('INSERT INTO fts VALUES(?)', (content, ))
assert r.rowcount == 1
r = c.execute("SELECT * FROM fts").fetchone()
assert r
assert r[str('content')] == content
c.close()
def test_match():
c = sqlite3.connect(':memory:')
c.row_factory = sqlite3.Row
name = 'simple'
contents = [('abc def', ), ('abc xyz', ), ('あいうえお かきくけこ', ),
('あいうえお らりるれろ', )]
fts.register_tokenizer(c, name,
fts.make_tokenizer_module(SimpleTokenizer()))
c.execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.executemany('INSERT INTO fts VALUES(?)', contents)
assert r.rowcount == 4
r = c.execute("SELECT * FROM fts").fetchall()
assert len(r) == 4
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'def'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[0][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'xyz'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[1][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'zzz'").fetchall()
assert len(r) == 0
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'あいうえお'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'かきくけこ'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[2][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'らりるれろ'").fetchall()
assert len(r) == 1 and r[0][str('content')] == contents[3][0]
r = c.execute("SELECT * FROM fts WHERE fts MATCH 'まみむめも'").fetchall()
assert len(r) == 0
c.close()
def test_full_text_index_queries():
name = 'simple'
docs = [(
'README',
'sqlitefts-python provides binding for tokenizer of SQLite Full-Text search(FTS3/4). It allows you to write tokenizers in Python.'
), ('LICENSE',
'''Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:'''),
('日本語', 'あいうえお かきくけこ さしすせそ たちつてと なにぬねの')]
with sqlite3.connect(':memory:') as c:
c.row_factory = sqlite3.Row
fts.register_tokenizer(c, name,
fts.make_tokenizer_module(SimpleTokenizer()))
c.execute(
"CREATE VIRTUAL TABLE docs USING FTS4(title, body, tokenize={})".format(
name))
c.executemany("INSERT INTO docs(title, body) VALUES(?, ?)", docs)
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Python'").fetchall(
)
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'bind'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'to'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'らりるれろ'").fetchall()
assert len(r) == 0
assert (
c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding'").fetchall()[0]
== c.execute(
"SELECT * FROM docs WHERE body MATCH 'binding'").fetchall()[0])
assert (
c.execute(
"SELECT * FROM docs WHERE body MATCH 'binding'").fetchall()[0]
== c.execute(
"SELECT * FROM docs WHERE docs MATCH 'body:binding'").fetchall(
)[0])
assert (
c.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall(
)[0] == c.execute(
"SELECT * FROM docs WHERE body MATCH 'あいうえお'").fetchall()[0])
assert (
c.execute("SELECT * FROM docs WHERE body MATCH 'かきくけこ'").fetchall(
)[0] == c.execute(
"SELECT * FROM docs WHERE docs MATCH 'body:かきくけこ'").fetchall()[
0])
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:bind'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:README'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'title:日本語'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE title MATCH 'bind'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE title MATCH 'README'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE title MATCH '日本語'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'to in'").fetchall()
assert len(r) == 2
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Py*'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'Z*'").fetchall()
assert len(r) == 0
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'あ*'").fetchall()
assert len(r) == 1
r = c.execute("SELECT * FROM docs WHERE docs MATCH 'ん*'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'tokenizer SQLite'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"tokenizer SQLite\"'").fetchall(
)
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお たちつてと'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"あいうえお たちつてと\"'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"tok* SQL*\"'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"tok* of SQL*\"'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"あ* さ*\"'").fetchall()
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH '\"あ* かきくけこ さ*\"'").fetchall()
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'tokenizer NEAR SQLite'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding NEAR/2 SQLite'").fetchall(
)
assert len(r) == 0
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'binding NEAR/3 SQLite'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR たちつてと'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/2 たちつてと'").fetchall(
)
assert len(r) == 1
r = c.execute(
"SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/3 たちつてと'").fetchall(
)
assert len(r) == 1
def test_tokenizer_output():
name = 'simple'
with sqlite3.connect(':memory:') as c:
fts.register_tokenizer(c, name,
fts.make_tokenizer_module(SimpleTokenizer()))
c.execute("CREATE VIRTUAL TABLE tok1 USING fts3tokenize({})".format(
name))
expect = [("This", 0, 4, 0), ("is", 5, 7, 1), ("a", 8, 9, 2),
("test", 10, 14, 3), ("sentence", 15, 23, 4)]
for a, e in zip(
c.execute("SELECT token, start, end, position "
"FROM tok1 WHERE input='This is a test sentence.'"),
expect):
assert e == a
s = 'これ は テスト の 文 です'
expect = [(None, 0, -1, 0)]
for i, t in enumerate(s.split()):
expect.append((t, expect[-1][2] + 1,
expect[-1][2] + 1 + len(t.encode('utf-8')), i))
expect = expect[1:]
for a, e in zip(
c.execute("SELECT token, start, end, position "
"FROM tok1 WHERE input=?", [s]), expect):
assert e == a
|
[
"mymelo@gmail.com"
] |
mymelo@gmail.com
|
2bd0fce8ed3b15c82198fc4e0f624e8e38e89983
|
60c0570b94d27e1671c757414bf20de20abf2c67
|
/backend/backend/profiles/urls.py
|
12e27a01e95b716cd47168f014bc5d15e4eb4703
|
[
"MIT"
] |
permissive
|
mightykim91/howaboutme
|
19f4b2ef7a809e4bc47e482cabf4d2f85808f15a
|
467c3a2eccc959084296bc7f4679e77b93b9d7f7
|
refs/heads/master
| 2023-01-20T00:04:28.844903
| 2020-11-26T11:20:50
| 2020-11-26T11:20:50
| 316,052,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from django.urls import path
from . import views
name = 'profiles'
urlpatterns = [
path('', views.ProfileView.as_view()),
path('<int:user_id>/', views.get_profile),
path('partners/', views.get_partners),
]
|
[
"rsm4282@naver.com"
] |
rsm4282@naver.com
|
8a6c0fa302cae9c1570396f3d292093bc32abbb5
|
7d61a3451ed9b79840ec7d9563312bfc23b66ec3
|
/train.py
|
0d08961b591d54d5104e80651cd7626eed0a4eb9
|
[
"MIT"
] |
permissive
|
champon1020/ViViT-pytorch
|
4f7fef6027a96e4be4c486a98df780b5bfcccf9f
|
9bb3ea8746360e2031fed15fd7b0fcde132725ba
|
refs/heads/master
| 2023-04-10T14:08:57.486027
| 2021-04-26T08:13:13
| 2021-04-26T08:13:13
| 356,957,946
| 0
| 0
|
MIT
| 2021-04-21T20:38:44
| 2021-04-11T19:16:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,200
|
py
|
import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from ignite.engine import Engine, Events
from ignite.metrics import Accuracy, Loss
from timesformer_pytorch import TimeSformer
from torch.utils.data import DataLoader
import wandb
from config import Config
from ucf101 import UCF101
from vivit import ViViT
wandb_online = True
cfg = Config()
"""
model = ViViT(
dim=512,
image_size=cfg.image_size,
patch_size=16,
num_classes=101,
num_frames=cfg.n_frames,
depth=12,
heads=4,
pool="cls",
in_channels=3,
dim_head=64,
dropout=0.1,
).cuda()
"""
model = TimeSformer(
dim=512,
image_size=cfg.image_size,
patch_size=16,
num_frames=cfg.n_frames,
num_classes=101,
depth=12,
heads=8,
dim_head=64,
attn_dropout=0.1,
ff_dropout=0.1,
).cuda()
model = nn.DataParallel(model)
optimizer = torch.optim.SGD(
model.parameters(),
lr=cfg.base_lr,
momentum=0.9,
nesterov=True,
weight_decay=1e-3,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.multistep_milestones)
ce_loss_fn = nn.CrossEntropyLoss()
def onehot_label(class_num: torch.Tensor):
return F.one_hot(class_num, num_classes=101)
print(sum(p.numel() for p in model.parameters()))
def train_step(engine, batch):
# return 0 # debug
model.train()
optimizer.zero_grad()
video, class_num = batch["video"].cuda(), batch["class"].cuda()
pred = model(video)
pred = F.softmax(pred, dim=1)
loss = ce_loss_fn(pred, class_num)
# print(torch.argmax(pred, dim=1), class_num)
loss.backward()
optimizer.step()
scheduler.step()
# torch.cuda.empty_cache()
return loss.item()
trainer = Engine(train_step)
def validation_step(engine, batch):
# return torch.rand(16, 101), torch.zeros(16).long() # debug
model.eval()
with torch.no_grad():
video, class_num = batch["video"].cuda(), batch["class"].cuda()
pred = model(video)
pred = F.softmax(pred, dim=1)
# torch.cuda.empty_cache()
return pred, class_num
evaluator = Engine(validation_step)
accuracy_metric = Accuracy()
accuracy_metric.attach(evaluator, "accuracy")
ce_loss_metric = Loss(ce_loss_fn)
ce_loss_metric.attach(evaluator, "loss")
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
e = engine.state.epoch
i = engine.state.iteration
loss = engine.state.output
print(f"Epoch: {e} / {cfg.epochs} : {i} - Loss: {loss:.5f}")
# if wandb_online:
# wandb.log({"loss": loss})
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
state = evaluator.run(train_loader)
metrics = state.metrics
loss = metrics["loss"]
accuracy = metrics["accuracy"]
e = engine.state.epoch
print(f"Training Results - Loss: {loss:.5f}, Avg accuracy: {accuracy:.5f}")
if wandb_online:
wandb.log({"train_loss": loss, "train_accuracy": accuracy})
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
state = evaluator.run(val_loader)
metrics = state.metrics
loss = metrics["loss"]
accuracy = metrics["accuracy"]
print(f"Valiation Results - Loss: {loss:.5f}, Avg accuracy: {accuracy:.5f}")
if wandb_online:
wandb.log({"validation_loss": loss, "validation_accuracy": accuracy})
if wandb_online:
wandb.init(project="vivit", name=f"vivit-{datetime.datetime.now()}")
train_loader = DataLoader(
UCF101(
"./dataset/UCF101",
"./dataset/ucfTrainTestlist/classInd.txt",
[
f"./dataset/ucfTrainTestlist/trainlist01.txt",
# f"./dataset/ucfTrainTestlist/trainlist02.txt",
],
cfg.n_frames,
cfg.image_size,
),
cfg.batch_size,
shuffle=True,
)
val_loader = DataLoader(
UCF101(
"./dataset/UCF101",
"./dataset/ucfTrainTestlist/classInd.txt",
[f"./dataset/ucfTrainTestlist/trainlist03.txt"],
cfg.n_frames,
cfg.image_size,
),
cfg.batch_size,
shuffle=False,
)
trainer.run(train_loader, max_epochs=cfg.epochs)
torch.save(model, f"./checkpoints/ckpt-{datetime.datetime.now()}.pt")
|
[
"nagatelu1020@gmail.com"
] |
nagatelu1020@gmail.com
|
088973a0dbd18923e03afc75a341c75a61a348e9
|
cb80ebc49bc92c350f6d6f039a6a4f0efa6b4c60
|
/EnvironmentVariables/EnvironmentVariables.py
|
9e6a9451d078f7f1d2002410d1982b10be2b1a30
|
[] |
no_license
|
rabramley/pythonTrials
|
9708ef1b39011c8c08909808132114ff3b30d34a
|
bbc93a9f69afbe3cd045de5835ad3c8a4a557050
|
refs/heads/master
| 2021-01-15T23:07:48.074817
| 2015-06-22T14:11:20
| 2015-06-22T14:11:20
| 32,924,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
#!/usr/bin/env python
import os
print os.environ['HOME']
# using get will return `None` if a key is not present rather than raise a `KeyError`
print os.environ.get('KEY_THAT_MIGHT_EXIST')
default_value = 'Use this instead'
# os.getenv is equivalent, and can also give a default value instead of `None`
print os.getenv('KEY_THAT_MIGHT_EXIST', default_value)
|
[
"rabramley@gmail.com"
] |
rabramley@gmail.com
|
5965cf6c2653d46dfb79c779094bab865c550470
|
115731ab247a84b25f1e4dce200df62500e67bee
|
/Figures/Paretofront/Analyses_Pareto/JRFs/Paretofront_JRF_Knee_Force.py
|
e82dba7fd9b332606f142fc77c5230f98df3b705
|
[
"Apache-2.0"
] |
permissive
|
AliKMBonab/Simulation-Based-Multi-criteria-Comparison-of-Monoarticular-and-Biarticular-Exoskeletons
|
418d8c7933e965ba04ca54edc88b7962e32ff674
|
6766608301b4ac575f7851c1a62c450edb0e0fbb
|
refs/heads/master
| 2023-05-07T12:19:33.526911
| 2020-09-24T09:13:22
| 2020-09-24T09:13:22
| 292,260,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,699
|
py
|
import collections
import copy
import os
import re
import csv
import enum
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pylab as pl
from scipy.signal import butter, filtfilt
import importlib
from tabulate import tabulate
from numpy import nanmean, nanstd
from perimysium import postprocessing as pp
from perimysium import dataman
import pathlib
import seaborn as sns
import Utils as utils
from Colors import colors as mycolors
#####################################################################################
subjects = ['05','07','09','10','11','12','14']
trials_num = ['01','02','03']
gait_cycle = np.linspace(0,100,1000)
#####################################################################################
# Reading CSV files into a dictionary and constructing gls
jrf_dataset = utils.csv2numpy('./Data/RRA/jrf_final_data.csv')
ideal_jrf_dataset = utils.csv2numpy('./Data/Ideal/jrf_ideal_exo_data.csv')
# pareto exo torque dataset
directory = './Data/Pareto/*_reaction_moments.csv'
files = enumerate(glob.iglob(directory), 1)
pareto_jrf_dataset = {pathlib.PurePath(f[1]).stem: np.loadtxt(f[1], delimiter=',') for f in files}
bi_noload_RM_dictionary = utils.clasify_data(pareto_jrf_dataset['biarticular_paretofront_noload_reaction_moments'],loadcondition='noload',pareto=True,device='biarticular')
bi_loaded_RM_dictionary = utils.clasify_data(pareto_jrf_dataset['biarticular_paretofront_loaded_reaction_moments'],loadcondition='loaded',pareto=True,device='biarticular')
mono_noload_RM_dictionary = utils.clasify_data(pareto_jrf_dataset['monoarticular_paretofront_noload_reaction_moments'],loadcondition='noload',pareto=True,device='monoarticular')
mono_loaded_RM_dictionary = utils.clasify_data(pareto_jrf_dataset['monoarticular_paretofront_loaded_reaction_moments'],loadcondition='loaded',pareto=True,device='monoarticular')
# pareto exo force dataset
directory = './Data/Pareto/*_reaction_forces.csv'
files = enumerate(glob.iglob(directory), 1)
pareto_jrf_dataset = {pathlib.PurePath(f[1]).stem: np.loadtxt(f[1], delimiter=',') for f in files}
bi_noload_RF_dictionary = utils.clasify_data(pareto_jrf_dataset['biarticular_paretofront_noload_reaction_forces'],loadcondition='noload',pareto=True,device='biarticular',forces_name=['Fx','Fy','Fz'])
bi_loaded_RF_dictionary = utils.clasify_data(pareto_jrf_dataset['biarticular_paretofront_loaded_reaction_forces'],loadcondition='loaded',pareto=True,device='biarticular',forces_name=['Fx','Fy','Fz'])
mono_noload_RF_dictionary = utils.clasify_data(pareto_jrf_dataset['monoarticular_paretofront_noload_reaction_forces'],loadcondition='noload',pareto=True,device='monoarticular',forces_name=['Fx','Fy','Fz'])
mono_loaded_RF_dictionary = utils.clasify_data(pareto_jrf_dataset['monoarticular_paretofront_loaded_reaction_forces'],loadcondition='loaded',pareto=True,device='monoarticular',forces_name=['Fx','Fy','Fz'])
# gls
gl_noload = {'noload_subject{}_trial{}'.format(i,j): utils.construct_gl_mass_side(subjectno=i,trialno=j,loadcond='noload') for i in subjects for j in trials_num}
gl_loaded = {'loaded_subject{}_trial{}'.format(i,j): utils.construct_gl_mass_side(subjectno=i,trialno=j,loadcond='loaded') for i in subjects for j in trials_num}
#####################################################################################
# Processing Data
# toe-off
noload_mean_toe_off,_,loaded_mean_toe_off,_= utils.toe_off_avg_std(gl_noload,gl_loaded)
# indices
bi_loaded_indices = np.array([25,24,23,22,21,17,16,13,12,11,6,1])
mono_loaded_indices = np.array([25,20,15,10,5,4,3,2,1])
bi_noload_indices = np.array([25,24,23,22,21,19,18,17,13,12,11,1])
mono_noload_indices = np.array([25,20,15,14,13,8,7,6,2,1])
#******************************
# knee joint MX
# bi
mean_bi_loaded_knee_joint_jrf_Fx, std_bi_loaded_knee_joint_jrf_Fx = utils.pareto_profiles_avg_std(bi_loaded_RF_dictionary['knee_joint_Fx'],gl_noload,simulation_num=len(bi_loaded_indices),change_direction=False)
mean_bi_noload_knee_joint_jrf_Fx, std_bi_noload_knee_joint_jrf_Fx = utils.pareto_profiles_avg_std(bi_noload_RF_dictionary['knee_joint_Fx'],gl_noload,simulation_num=len(bi_noload_indices),change_direction=False)
# mono
mean_mono_loaded_knee_joint_jrf_Fx, std_mono_loaded_knee_joint_jrf_Fx = utils.pareto_profiles_avg_std(mono_loaded_RF_dictionary['knee_joint_Fx'],gl_noload,simulation_num=len(mono_loaded_indices),change_direction=False)
mean_mono_noload_knee_joint_jrf_Fx, std_mono_noload_knee_joint_jrf_Fx = utils.pareto_profiles_avg_std(mono_noload_RF_dictionary['knee_joint_Fx'],gl_noload,simulation_num=len(mono_noload_indices),change_direction=False)
# knee joint MY
# bi
mean_bi_loaded_knee_joint_jrf_Fy, std_bi_loaded_knee_joint_jrf_Fy = utils.pareto_profiles_avg_std(bi_loaded_RF_dictionary['knee_joint_Fy'],gl_noload,simulation_num=len(bi_loaded_indices),change_direction=False)
mean_bi_noload_knee_joint_jrf_Fy, std_bi_noload_knee_joint_jrf_Fy = utils.pareto_profiles_avg_std(bi_noload_RF_dictionary['knee_joint_Fy'],gl_noload,simulation_num=len(bi_noload_indices),change_direction=False)
# mono
mean_mono_loaded_knee_joint_jrf_Fy, std_mono_loaded_knee_joint_jrf_Fy = utils.pareto_profiles_avg_std(mono_loaded_RF_dictionary['knee_joint_Fy'],gl_noload,simulation_num=len(mono_loaded_indices),change_direction=False)
mean_mono_noload_knee_joint_jrf_Fy, std_mono_noload_knee_joint_jrf_Fy = utils.pareto_profiles_avg_std(mono_noload_RF_dictionary['knee_joint_Fy'],gl_noload,simulation_num=len(mono_noload_indices),change_direction=False)
# knee joint MZ
# bi
mean_bi_loaded_knee_joint_jrf_Fz, std_bi_loaded_knee_joint_jrf_Fz = utils.pareto_profiles_avg_std(bi_loaded_RF_dictionary['knee_joint_Fz'],gl_noload,simulation_num=len(bi_loaded_indices),change_direction=False)
mean_bi_noload_knee_joint_jrf_Fz, std_bi_noload_knee_joint_jrf_Fz = utils.pareto_profiles_avg_std(bi_noload_RF_dictionary['knee_joint_Fz'],gl_noload,simulation_num=len(bi_noload_indices),change_direction=False)
# mono
mean_mono_loaded_knee_joint_jrf_Fz, std_mono_loaded_knee_joint_jrf_Fz = utils.pareto_profiles_avg_std(mono_loaded_RF_dictionary['knee_joint_Fz'],gl_noload,simulation_num=len(mono_loaded_indices),change_direction=False)
mean_mono_noload_knee_joint_jrf_Fz, std_mono_noload_knee_joint_jrf_Fz = utils.pareto_profiles_avg_std(mono_noload_RF_dictionary['knee_joint_Fz'],gl_noload,simulation_num=len(mono_noload_indices),change_direction=False)
#####################################################################################
# profile plots
#************************************************************************************
# torque profile
fig, axes = plt.subplots(nrows=4,ncols=3,num='Pareto Curve: loaded mono vs bi',figsize=(12.6, 14.8))
# biarticular loaded Fx
plot_dic = {'data':utils.smooth(mean_bi_loaded_knee_joint_jrf_Fx,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFx'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':bi_loaded_indices,'title':'knee Fx,\n loaded biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_loaded_knee_RFx'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,1)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True,ylabel='force (N/kg)')
ax.set_yticks([-45,-30,-15,0,15])
ax.set_ylim((-45,15))
# biarticular loaded Fy
plot_dic = {'data':utils.smooth(mean_bi_loaded_knee_joint_jrf_Fy,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFy'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':bi_loaded_indices,'title':'knee Fy,\n loaded biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_loaded_knee_RFy'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,2)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-60,-45,-30,-15,0])
ax.set_ylim((-60,0))
# biarticular loaded Fz
plot_dic = {'data':utils.smooth(mean_bi_loaded_knee_joint_jrf_Fz,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFz'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':bi_loaded_indices,'title':'knee Fz,\n loaded biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_loaded_knee_RFz'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,3)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=True,adjust_axes=True,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-8,-6,-4,-2,0])
ax.set_ylim((-8,1))
#****************************************
#****************************************
# monoarticular loaded Fx
plot_dic = {'data':utils.smooth(mean_mono_loaded_knee_joint_jrf_Fx,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFx'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':mono_loaded_indices,'title':'knee Fx,\n loaded monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_loaded_knee_RFx'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,4)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True,ylabel='force (N/kg)')
ax.set_yticks([-45,-30,-15,0,15])
ax.set_ylim((-45,15))
# monoarticular loaded Fy
plot_dic = {'data':utils.smooth(mean_mono_loaded_knee_joint_jrf_Fy,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFy'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':mono_loaded_indices,'title':'knee Fy,\n loaded monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_loaded_knee_RFy'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,5)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-60,-45,-30,-15,0])
ax.set_ylim((-60,0))
# monoarticular loaded Fz
plot_dic = {'data':utils.smooth(mean_mono_loaded_knee_joint_jrf_Fz,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_loaded_kneejoint_RFz'],5),'joint_color':'k',
'avg_toeoff':loaded_mean_toe_off,'indices':mono_loaded_indices,'title':'knee Fz,\n loaded monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_loaded_knee_RFz'],'ideal_color':mycolors['crimson red']}
ax = plt.subplot(4,3,6)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=True,adjust_axes=True,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-8,-6,-4,-2,0])
ax.set_ylim((-8,1))
#****************************************
#****************************************
# biarticular noload Fx
plot_dic = {'data':utils.smooth(mean_bi_noload_knee_joint_jrf_Fx,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFx'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':bi_noload_indices,'title':'knee Fx,\n noload biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_noload_knee_RFx'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,7)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True,ylabel='force (N/kg)')
ax.set_yticks([-45,-30,-15,0,15])
ax.set_ylim((-45,15))
# biarticular noload Fy
plot_dic = {'data':utils.smooth(mean_bi_noload_knee_joint_jrf_Fy,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFy'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':bi_noload_indices,'title':'knee Fy,\n noload biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_noload_knee_RFy'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,8)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-60,-45,-30,-15,0])
ax.set_ylim((-60,0))
# biarticular noload Fz
plot_dic = {'data':utils.smooth(mean_bi_noload_knee_joint_jrf_Fz,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFz'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':bi_noload_indices,'title':'knee Fz,\n noload biarticular',
'ideal_data':ideal_jrf_dataset['mean_bi_noload_knee_RFz'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,9)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=True,adjust_axes=True,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-8,-6,-4,-2,0])
ax.set_ylim((-8,1))
#****************************************
#****************************************
# monoarticular noload Fx
plot_dic = {'data':utils.smooth(mean_mono_noload_knee_joint_jrf_Fx,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFx'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':mono_noload_indices,'title':'knee Fx,\n noload monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_noload_knee_RFx'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,10)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True,ylabel='force (N/kg)')
ax.set_yticks([-45,-30,-15,0,15])
ax.set_ylim((-45,15))
# monoarticular noload Fy
plot_dic = {'data':utils.smooth(mean_mono_noload_knee_joint_jrf_Fy,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFy'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':mono_noload_indices,'title':'knee Fy,\n noload monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_noload_knee_RFy'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,11)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=False,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-60,-45,-30,-15,0])
ax.set_ylim((-60,0))
# monoarticular noload Fz
plot_dic = {'data':utils.smooth(mean_mono_noload_knee_joint_jrf_Fz,5,multidim=True),
'joint_data':utils.smooth(jrf_dataset['mean_noload_kneejoint_RFz'],5),'joint_color':'xkcd:shamrock green',
'avg_toeoff':noload_mean_toe_off,'indices':mono_noload_indices,'title':'knee Fz,\n noload monoarticular',
'ideal_data':ideal_jrf_dataset['mean_mono_noload_knee_RFz'],'ideal_color':mycolors['french rose']}
ax = plt.subplot(4,3,12)
utils.plot_paretofront_profile_changes(plot_dic,colormap='tab20',include_colorbar=True,adjust_axes=True,
toeoff_color='k',add_ideal_profile=True)
ax.set_yticks([-8,-6,-4,-2,0])
ax.set_ylim((-8,1))
fig.tight_layout(h_pad=-1.5, w_pad=-1.5)
fig.subplots_adjust(top=0.98, bottom=0.075, left=0.05, right=0.95,hspace=0.45,wspace=0.05)
fig.savefig('./Figures/Paretofront/Analyses_Pareto/JRFs/Paretofront_JRF_Knee_Force.pdf',orientation='landscape',bbox_inches='tight')
plt.show()
|
[
"alik@sabanciuniv.local"
] |
alik@sabanciuniv.local
|
2c6a2672603f19ab8dc70bb4d8be325ea3ba9772
|
505ca3057976e988a8430742a9a1141b696280f7
|
/bot.py
|
c35b9abde13cb3d86eb5ba12ce34c3c2c2ccc158
|
[] |
no_license
|
ibuckshot5/New-Version-Forced
|
32455c9d7cbaf0f4da15db2d9846274b7c813d3e
|
d383f74dbb11234c979321f92564183bee43ee08
|
refs/heads/master
| 2021-01-23T08:34:48.781231
| 2017-09-05T23:32:02
| 2017-09-05T23:32:02
| 102,539,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
import configargparse
import requests
import logging
import sys
from time import sleep
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] [%(levelname)5s] %(message)s')
log = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
def main():
parser = configargparse.ArgumentParser()
parser.add_argument('-px', '--proxy', help='Proxy to use when version force-checking.')
parser.add_argument('-wh', '--webhook', help='Discord webhook to push to once a new version is forced.')
parser.add_argument('-dv', '--default-version', help='Version to default to, skipping initial check.')
parser.add_argument('-dm', '--discord-message', help='Discord message to send once version forced. '
'See wiki for details.')
parser.add_argument('-cd', '--check-delay', default=300, help='Check every X seconds')
args = parser.parse_args()
if args.proxy == None:
log.error('Proxy URL not found, exiting.')
sys.exit(1)
version = args.default_version
if args.default_version == None:
log.info('Running initial check, to determine version...')
r = requests.get('https://pgorelease.nianticlabs.com/plfe/version', proxies={
'http': args.proxy
})
version = r.text.replace('\n\x06', '')
while True:
log.info('Running version check...')
r = requests.get('https://pgorelease.nianticlabs.com/plfe/version', proxies={
'http': args.proxy
}).text.replace('\n\x06', '')
if r is not version:
msg = create_discord_message(version, r, args.discord_message)
requests.post(args.webhook, data={
'content': msg
})
log.warning('{} is being forced! Sending alert.'.format(r))
sleep(int(args.check_delay))
def create_discord_message(old_ver, new_ver, template):
return template.replace('<old>', old_ver).replace('<new>', new_ver)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f6e52ffb6985d2989246c90d72b42b70bbc332b0
|
7401aec7b85d358c1d05d9526c675e69bde0152b
|
/script/srcOverlapScore/pocketgeneral/mainApoHolo.py
|
adba364d64c2208938a82935d0b23107c369c69f
|
[] |
no_license
|
akramhecini/stage_HECINI
|
f2a1b43601098e3d3358962ead042634f7d77792
|
164f01bb047b6a8c7e2a42f02f5481861ccef808
|
refs/heads/master
| 2020-05-01T17:47:22.244932
| 2019-03-15T18:31:48
| 2019-03-15T18:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,556
|
py
|
"""
BORREL Alexandre
09-2012
"""
import globalFonction
import runOtherProg
import writeFiles
import pathDirectory
import tool
import analysis
import superposeStructure
from os import listdir, path, remove
from re import search
def main (path_file_model, name_dataset="ApoForm"):
dico_dataset = globalFonction.calculDatasetDictionary(name_dataset,1)
# print len(dico_dataset.keys())
path_file_correspondace = writeFiles.corespondanceApoHolo (dico_dataset, pathDirectory.result(name_dataset) + "correspondencePDB")
# divise dataset
dico_dataset_type_apo = tool.selectOnlyTypeStructure(dico_dataset, "apo structure")
dico_dataset_type_holo = tool.selectOnlyTypeStructure(dico_dataset, "holo structure")
print len (dico_dataset_type_apo.keys ())
print dico_dataset_type_holo[dico_dataset_type_holo.keys ()[1]]
# accessibility solvent
runOtherProg.globalNACCESS(dico_dataset_type_apo, name_dataset)
runOtherProg.globalNACCESS(dico_dataset_type_holo, name_dataset)
# superimpose -> retrieve matrix transloc
superposeStructure.superposeApoHolo(dico_dataset, name_dataset)
######################
# Pocket Estimation #
######################
# pocket estimation holo
globalFonction.pocketEstimation(name_dataset, dico_dataset_type_holo, "Fpocket", runFpocket = 1)
# pocket estimation apo
globalFonction.pocketEstimationApoForm(name_dataset, dico_dataset_type_apo, dico_dataset_type_holo, runFpocket = 1)
##############
# Descriptor #
##############
# descriptor
dico_descriptors_type_apo = globalFonction.retrieveGlobalDescriptors ("Fpocket", "none", dico_dataset_type_apo, name_dataset, write_file = 0, calcul_descriptor = 1, option_separate = 1)
dico_descriptors_type_holo = globalFonction.retrieveGlobalDescriptors ("Fpocket", "none", dico_dataset_type_holo, name_dataset, write_file = 0, calcul_descriptor = 1, option_separate = 1)
print dico_descriptors_type_holo
#####################
# write data global #
#####################
path_file_descriptor_apo = writeFiles.globalDescriptors(dico_descriptors_type_apo, pathDirectory.result(name_dataset) + "apo_all_pocket.data")
path_file_descriptor_holo = writeFiles.globalDescriptors(dico_descriptors_type_holo, pathDirectory.result(name_dataset) + "holo_all_pocket.data")
path_file_RMSD = pathDirectory.searchRMSDFile (pathDirectory.descriptor(name_dataset))
path_dir_result = pathDirectory.result(name_dataset)
# color for ACP
writeFiles.colorACPFile (dico_descriptors_type_apo)
###############
# ACP #
###############
### # apo protein
analysis.specificACP("global", dico_descriptors_type_apo, path_dir_result + "apo_globalData", mainACP = "All_descriptors_Apo")
analysis.specificACP(["hydrophobic_kyte","p_Ooh_atom", "p_aromatic_residues"], dico_descriptors_type_apo, path_dir_result + "apo_descModel", mainACP = "Specific_descriptors")
### # holo protein
# analysis.specificACP("global", dico_descriptors_type_holo, path_dir_result + "holo_globalData", mainACP = "All_descriptors_holo")
# analysis.specificACP(["hydrophobic_kyte","p_Ooh_atom", "p_aromatic_residues"], dico_descriptors_type_holo, path_dir_result + "holo_descModel", mainACP = "Specific_descriptors")
### # PCA apo and holo same plot
# runOtherProg.ACPDataset (path_file_descriptor_apo, path_file_descriptor_holo, path_dir_result + "PCA_apo_holo")
analysis.ACPTwoDatasetDescriptor(dico_descriptors_type_apo, dico_descriptors_type_holo, ["hydrophobic_kyte","p_Ooh_atom", "p_aromatic_residues"], path_dir_result + "desc_model", correspondance_file=path_file_correspondace)
analysis.ACPTwoDatasetDescriptor(dico_descriptors_type_apo, dico_descriptors_type_holo, "radi", path_dir_result+ "radi", correspondance_file=path_file_correspondace)
analysis.ACPTwoDatasetDescriptor(dico_descriptors_type_apo, dico_descriptors_type_holo,["RADIUS_HULL", "DIAMETER_HULL", "SURFACE_HULL", "VOLUME_HULL", "SMALLEST_SIZE", "INERTIA_3", "INERTIA_1", "FACE", "PCI", "PSI", "RADIUS_CYLINDER", "X._ATOM_CONVEXE", "CONVEX.SHAPE_COEFFICIENT", "INERTIA_2", "C_RESIDUES", "C_ATOM"], path_dir_result+ "geoOnly", correspondance_file=path_file_correspondace)
#
################
# RMSD pocket #
################
analysis.RMSDPockets (dico_dataset, pathDirectory.result(name_dataset + "/RMSD"), name_dataset)
########################
# Histogram descriptor #
########################
# analysis.histogramFonctionRMSD (dico_descriptors_type_apo, dico_descriptors_type_holo, path_file_RMSD, path_dir_result, ["hydrophobic_kyte"])
# analysis.histogramFonctionRMSD (dico_descriptors_type_apo, dico_descriptors_type_holo, path_file_RMSD, path_dir_result, ["p_Ooh_atom"])
# analysis.histogramFonctionRMSD (dico_descriptors_type_apo, dico_descriptors_type_holo, path_file_RMSD, path_dir_result, ["p_aromatic_residues"])
###################
# apply LDA model #
###################
# # global
# # apo
globalFonction.applyModel(path_file_model, path_file_descriptor_apo, path_dir_result + "predictApo.result") # laisse seulement une commande dans la fonction pour apres car surement ACP ou autre
#
# # holo
globalFonction.applyModel(path_file_model, path_file_descriptor_holo, path_dir_result + "predictHolo.result")
#by type holo structure
def multiModelTest (path_dir_model, name_file_result = "", name_dataset="ApoForm"):
f = 0
dico_dataset = globalFonction.calculDatasetDictionary(name_dataset,0)
if name_dataset == "ApoForm138" :
name_dataset = "ApoForm"
f = 1
path_file_correspondace = writeFiles.corespondanceApoHolo (dico_dataset, pathDirectory.result(name_dataset) + "correspondencePDB")
# divise dataset
dico_dataset_type_apo = tool.selectOnlyTypeStructure(dico_dataset, "apo structure")
dico_dataset_type_holo = tool.selectOnlyTypeStructure(dico_dataset, "holo structure")
# dictionnary with descriptors
dico_descriptors_type_apo = globalFonction.retrieveGlobalDescriptors ("Fpocket", "none", dico_dataset_type_apo, name_dataset, write_file = 0, calcul_descriptor = 0, option_separate = 1)
dico_descriptors_type_holo = globalFonction.retrieveGlobalDescriptors ("Fpocket", "none", dico_dataset_type_holo, name_dataset, write_file = 0, calcul_descriptor = 0, option_separate = 1)
if f == 1 :
name_dataset = "ApoForm138"
path_dir_result = pathDirectory.result(name_dataset)
#####################
# write data global #
#####################
path_file_descriptor_apo = writeFiles.globalDescriptors(dico_descriptors_type_apo, pathDirectory.result(name_dataset) + "apo_all_pocket.data")
path_file_descriptor_holo = writeFiles.globalDescriptors(dico_descriptors_type_holo, pathDirectory.result(name_dataset) + "holo_all_pocket.data")
l_file_model = listdir(path_dir_model)
p_file_result = path_dir_result + "best" + str (name_file_result) + ".result"
# check exist ?
if path.exists(p_file_result) :
remove(p_file_result)
for p_file_model in l_file_model :
if search("Rdata", p_file_model) :
runOtherProg.predictLDA (path_dir_model + p_file_model, path_file_descriptor_apo, p_file_result, plot = 0)
# Dataset Apo Schmitke global -> refaire la fonction de codage #
#################################################################
# main ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/AutoSelected/autoselected.Rdata", name_dataset="ApoForm")
# main ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/AutoSelected/autoselected.Rdata", name_dataset="ApoForm138")
# Dataset Apo Schmitke clean -> only one apo by holo
# main ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/AutoSelected/autoselected.Rdata", name_dataset="ApoFormClean")
#####main ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/hydroAro/hydroAro.Rdata", name_dataset="ApoFormClean")
# apo huang
# main ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/AutoSelected/autoselected.Rdata", name_dataset = "ApoHuang")
##############################################
# test best models proposed with selection #
##############################################
# -POE- #
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/selectedDesc/BestModels25/", name_dataset="ApoForm138", name_file_result = "POE3_25")
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/selectedDesc4/BestModels25/", name_dataset="ApoForm138", name_file_result = "POE4_25")
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/selectedDesc/BestModels25/", name_dataset="ApoFormClean", name_file_result = "POE3_25")
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/Fpocket/LDA/selectedDesc4/BestModels25/", name_dataset="ApoFormClean", name_file_result = "POE4_25")
# -PLE- #
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/proximity/LDA/selectedDesc/BestModels25/", name_dataset="ApoForm138", name_file_result = "PLE_25")
# multiModelTest ("/home/borrel/druggabilityProject/result/krasowski/proximity/LDA/selectedDesc/BestModels25/", name_dataset="ApoFormClean", name_file_result = "PLE_25")
|
[
"leslie.regad@gmail.com"
] |
leslie.regad@gmail.com
|
d03fdd7163443682518f951771af10526d09f73e
|
52bf896a20be0cc22c0e81327f92eb99e7fa59d1
|
/tests/test_parsing.py
|
723dd34aab106cc9a5dda37b1e6fc32b91ca5e5a
|
[
"MIT"
] |
permissive
|
regebro/svg.path
|
f13b73eb0a6a416f725380b46f9bd89cc353de3b
|
5548748f41db4510212c92187d5aa6c2c324b660
|
refs/heads/master
| 2023-08-30T11:15:07.279639
| 2023-04-29T16:42:28
| 2023-04-29T16:42:28
| 7,987,589
| 196
| 54
|
MIT
| 2023-05-01T19:02:35
| 2013-02-03T07:13:19
|
Python
|
UTF-8
|
Python
| false
| false
| 23,271
|
py
|
import unittest
from svg.path.path import CubicBezier, QuadraticBezier, Line, Arc, Path, Move, Close
from svg.path.parser import parse_path
class TestParser(unittest.TestCase):
maxDiff = None
def test_svg_examples(self):
"""Examples from the SVG spec"""
path1 = parse_path("M 100 100 L 300 100 L 200 300 z")
self.assertEqual(
path1,
Path(
Move(100 + 100j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Close(200 + 300j, 100 + 100j),
),
)
# for Z command behavior when there is multiple subpaths
path1 = parse_path("M 0 0 L 50 20 M 100 100 L 300 100 L 200 300 z")
self.assertEqual(
path1,
Path(
Move(0j),
Line(0 + 0j, 50 + 20j),
Move(100 + 100j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Close(200 + 300j, 100 + 100j),
),
)
path1 = parse_path("M 100 100 L 200 200")
path2 = parse_path("M100 100L200 200")
self.assertEqual(path1, path2)
path1 = parse_path("M 100 200 L 200 100 L -100 -200")
path2 = parse_path("M 100 200 L 200 100 -100 -200")
self.assertEqual(path1, path2)
path1 = parse_path(
"""M100,200 C100,100 250,100 250,200
S400,300 400,200"""
)
self.assertEqual(
path1,
Path(
Move(100 + 200j),
CubicBezier(100 + 200j, 100 + 100j, 250 + 100j, 250 + 200j),
CubicBezier(250 + 200j, 250 + 300j, 400 + 300j, 400 + 200j),
),
)
path1 = parse_path("M100,200 C100,100 400,100 400,200")
self.assertEqual(
path1,
Path(
Move(100 + 200j),
CubicBezier(100 + 200j, 100 + 100j, 400 + 100j, 400 + 200j),
),
)
path1 = parse_path("M100,500 C25,400 475,400 400,500")
self.assertEqual(
path1,
Path(
Move(100 + 500j),
CubicBezier(100 + 500j, 25 + 400j, 475 + 400j, 400 + 500j),
),
)
path1 = parse_path("M100,800 C175,700 325,700 400,800")
self.assertEqual(
path1,
Path(
Move(100 + 800j),
CubicBezier(100 + 800j, 175 + 700j, 325 + 700j, 400 + 800j),
),
)
path1 = parse_path("M600,200 C675,100 975,100 900,200")
self.assertEqual(
path1,
Path(
Move(600 + 200j),
CubicBezier(600 + 200j, 675 + 100j, 975 + 100j, 900 + 200j),
),
)
path1 = parse_path("M600,500 C600,350 900,650 900,500")
self.assertEqual(
path1,
Path(
Move(600 + 500j),
CubicBezier(600 + 500j, 600 + 350j, 900 + 650j, 900 + 500j),
),
)
path1 = parse_path(
"""M600,800 C625,700 725,700 750,800
S875,900 900,800"""
)
self.assertEqual(
path1,
Path(
Move(600 + 800j),
CubicBezier(600 + 800j, 625 + 700j, 725 + 700j, 750 + 800j),
CubicBezier(750 + 800j, 775 + 900j, 875 + 900j, 900 + 800j),
),
)
path1 = parse_path("M200,300 Q400,50 600,300 T1000,300")
self.assertEqual(
path1,
Path(
Move(200 + 300j),
QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j),
QuadraticBezier(600 + 300j, 800 + 550j, 1000 + 300j),
),
)
path1 = parse_path("M300,200 h-150 a150,150 0 1,0 150,-150 z")
self.assertEqual(
path1,
Path(
Move(300 + 200j),
Line(300 + 200j, 150 + 200j),
Arc(150 + 200j, 150 + 150j, 0, 1, 0, 300 + 50j),
Close(300 + 50j, 300 + 200j),
),
)
path1 = parse_path("M275,175 v-150 a150,150 0 0,0 -150,150 z")
self.assertEqual(
path1,
Path(
Move(275 + 175j),
Line(275 + 175j, 275 + 25j),
Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),
Close(125 + 175j, 275 + 175j),
),
)
path1 = parse_path("M275,175 v-150 a150,150 0 0,0 -150,150 L 275,175 z")
self.assertEqual(
path1,
Path(
Move(275 + 175j),
Line(275 + 175j, 275 + 25j),
Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),
Line(125 + 175j, 275 + 175j),
Close(275 + 175j, 275 + 175j),
),
)
path1 = parse_path(
"""M600,350 l 50,-25
a25,25 -30 0,1 50,-25 l 50,-25
a25,50 -30 0,1 50,-25 l 50,-25
a25,75 -30 0,1 50,-25 l 50,-25
a25,100 -30 0,1 50,-25 l 50,-25"""
)
self.assertEqual(
path1,
Path(
Move(600 + 350j),
Line(600 + 350j, 650 + 325j),
Arc(650 + 325j, 25 + 25j, -30, 0, 1, 700 + 300j),
Line(700 + 300j, 750 + 275j),
Arc(750 + 275j, 25 + 50j, -30, 0, 1, 800 + 250j),
Line(800 + 250j, 850 + 225j),
Arc(850 + 225j, 25 + 75j, -30, 0, 1, 900 + 200j),
Line(900 + 200j, 950 + 175j),
Arc(950 + 175j, 25 + 100j, -30, 0, 1, 1000 + 150j),
Line(1000 + 150j, 1050 + 125j),
),
)
def test_wc3_examples12(self):
"""
W3C_SVG_11_TestSuite Paths
Test using multiple coord sets to build a polybeizer, and implicit values for initial S.
"""
path12 = parse_path(
"M 100 100 C 100 20 200 20 200 100 S 300 180 300 100"
)
self.assertEqual(
path12,
Path(
Move(to=(100 + 100j)),
CubicBezier(
start=(100 + 100j),
control1=(100 + 20j),
control2=(200 + 20j),
end=(200 + 100j),
),
CubicBezier(
start=(200 + 100j),
control1=(200 + 180j),
control2=(300 + 180j),
end=(300 + 100j),
),
),
)
path12 = parse_path("M 100 250 S 200 200 200 250 300 300 300 250")
self.assertEqual(
path12,
Path(
Move(to=(100 + 250j)),
CubicBezier(
start=(100 + 250j),
control1=(100 + 250j),
control2=(200 + 200j),
end=(200 + 250j),
),
CubicBezier(
start=(200 + 250j),
control1=(200 + 300j),
control2=(300 + 300j),
end=(300 + 250j),
),
),
)
def test_wc3_examples13(self):
"""
W3C_SVG_11_TestSuite Paths
Test multiple coordinates for V and H.
"""
#
path13 = parse_path(
" M 240.00000 56.00000 H 270.00000 300.00000 320.00000 400.00000 "
)
self.assertEqual(
path13,
Path(
Move(to=(240 + 56j)),
Line(start=(240 + 56j), end=(270 + 56j)),
Line(start=(270 + 56j), end=(300 + 56j)),
Line(start=(300 + 56j), end=(320 + 56j)),
Line(start=(320 + 56j), end=(400 + 56j)),
),
)
path13 = parse_path(
" M 240.00000 156.00000 V 180.00000 200.00000 260.00000 300.00000 "
)
self.assertEqual(
path13,
Path(
Move(to=(240 + 156j)),
Line(start=(240 + 156j), end=(240 + 180j)),
Line(start=(240 + 180j), end=(240 + 200j)),
Line(start=(240 + 200j), end=(240 + 260j)),
Line(start=(240 + 260j), end=(240 + 300j)),
),
)
def test_wc3_examples14(self):
"""
W3C_SVG_11_TestSuite Paths
Test implicit values for moveto. If the first command is 'm' it should be taken as an absolute moveto,
plus implicit lineto.
"""
path14 = parse_path(
" m 62.00000 56.00000 51.96152 90.00000 -103.92304 0.00000 51.96152 "
"-90.00000 z m 0.00000 15.00000 38.97114 67.50000 -77.91228 0.00000 "
"38.97114 -67.50000 z "
)
self.assertEqual(
path14,
Path(
Move(to=(62 + 56j)),
Line(start=(62 + 56j), end=(113.96152000000001 + 146j)),
Line(
start=(113.96152000000001 + 146j), end=(10.038480000000007 + 146j)
),
Line(start=(10.038480000000007 + 146j), end=(62.00000000000001 + 56j)),
Close(start=(62.00000000000001 + 56j), end=(62 + 56j)),
Move(to=(62 + 71j)),
Line(start=(62 + 71j), end=(100.97113999999999 + 138.5j)),
Line(
start=(100.97113999999999 + 138.5j),
end=(23.058859999999996 + 138.5j),
),
Line(
start=(23.058859999999996 + 138.5j), end=(62.029999999999994 + 71j)
),
Close(start=(62.029999999999994 + 71j), end=(62 + 71j)),
),
)
path14 = parse_path(
"M 177.00000 56.00000 228.96152 146.00000 125.03848 146.00000 177.00000 "
"56.00000 Z M 177.00000 71.00000 215.97114 138.50000 138.02886 138.50000 "
"177.00000 71.00000 Z "
)
self.assertEqual(
path14,
Path(
Move(to=(177 + 56j)),
Line(start=(177 + 56j), end=(228.96152 + 146j)),
Line(start=(228.96152 + 146j), end=(125.03848 + 146j)),
Line(start=(125.03848 + 146j), end=(177 + 56j)),
Close(start=(177 + 56j), end=(177 + 56j)),
Move(to=(177 + 71j)),
Line(start=(177 + 71j), end=(215.97114 + 138.5j)),
Line(start=(215.97114 + 138.5j), end=(138.02886 + 138.5j)),
Line(start=(138.02886 + 138.5j), end=(177 + 71j)),
Close(start=(177 + 71j), end=(177 + 71j)),
),
)
def test_wc3_examples15(self):
"""
W3C_SVG_11_TestSuite Paths
'M' or 'm' command with more than one pair of coordinates are absolute
if the moveto was specified with 'M' and relative if the moveto was
specified with 'm'.
"""
path15 = parse_path("M100,120 L160,220 L40,220 z")
self.assertEqual(
path15,
Path(
Move(to=(100 + 120j)),
Line(start=(100 + 120j), end=(160 + 220j)),
Line(start=(160 + 220j), end=(40 + 220j)),
Close(start=(40 + 220j), end=(100 + 120j)),
),
)
path15 = parse_path("M350,120 L410,220 L290,220 z")
self.assertEqual(
path15,
Path(
Move(to=(350 + 120j)),
Line(start=(350 + 120j), end=(410 + 220j)),
Line(start=(410 + 220j), end=(290 + 220j)),
Close(start=(290 + 220j), end=(350 + 120j)),
),
)
path15 = parse_path("M100,120 160,220 40,220 z")
self.assertEqual(
path15,
Path(
Move(to=(100 + 120j)),
Line(start=(100 + 120j), end=(160 + 220j)),
Line(start=(160 + 220j), end=(40 + 220j)),
Close(start=(40 + 220j), end=(100 + 120j)),
),
)
path15 = parse_path("m350,120 60,100 -120,0 z")
self.assertEqual(
path15,
Path(
Move(to=(350 + 120j)),
Line(start=(350 + 120j), end=(410 + 220j)),
Line(start=(410 + 220j), end=(290 + 220j)),
Close(start=(290 + 220j), end=(350 + 120j)),
),
)
def test_wc3_examples17(self):
"""
W3C_SVG_11_TestSuite Paths
Test that the 'z' and 'Z' command have the same effect.
"""
path17a = parse_path("M 50 50 L 50 150 L 150 150 L 150 50 z")
path17b = parse_path("M 50 50 L 50 150 L 150 150 L 150 50 Z")
self.assertEqual(path17a, path17b)
path17a = parse_path("M 250 50 L 250 150 L 350 150 L 350 50 Z")
path17b = parse_path("M 250 50 L 250 150 L 350 150 L 350 50 z")
self.assertEqual(path17a, path17b)
def test_wc3_examples18(self):
"""
W3C_SVG_11_TestSuite Paths
The 'path' element's 'd' attribute ignores additional whitespace, newline characters, and commas,
and BNF processing consumes as much content as possible, stopping as soon as a character that doesn't
satisfy the production is encountered.
"""
path18a = parse_path("M 20 40 H 40")
path18b = parse_path(
"""M 20 40
H 40"""
)
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 60 H 40")
path18b = parse_path(
"""
M
20
60
H
40
"""
)
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 80 H40")
path18b = parse_path("M 20,80 H 40")
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 100 H 40#90")
path18b = parse_path("M 20 100 H 40")
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 120 H 40.5 0.6")
path18b = parse_path("M 20 120 H 40.5.6")
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 140 h 10 -20")
path18b = parse_path("M 20 140 h 10-20")
self.assertEqual(path18a, path18b)
path18a = parse_path("M 20 160 H 40")
path18b = parse_path("M 20 160 H 40#90")
self.assertEqual(path18a, path18b)
def test_wc3_examples19(self):
"""
W3C_SVG_11_TestSuite Paths
Test that additional parameters to pathdata commands are treated as additional calls to the most recent command.
"""
path19a = parse_path("M20 20 H40 H60")
path19b = parse_path("M20 20 H40 60")
self.assertEqual(path19a, path19b)
path19a = parse_path("M20 40 h20 h20")
path19b = parse_path("M20 40 h20 20")
self.assertEqual(path19a, path19b)
path19a = parse_path("M120 20 V40 V60")
path19b = parse_path("M120 20 V40 60")
self.assertEqual(path19a, path19b)
path19a = parse_path("M140 20 v20 v20")
path19b = parse_path("M140 20 v20 20")
self.assertEqual(path19a, path19b)
path19a = parse_path("M220 20 L 240 20 L260 20")
path19b = parse_path("M220 20 L 240 20 260 20 ")
self.assertEqual(path19a, path19b)
path19a = parse_path("M220 40 l 20 0 l 20 0")
path19b = parse_path("M220 40 l 20 0 20 0")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50 150 C50 50 200 50 200 150 C200 50 350 50 350 150")
path19b = parse_path("M50 150 C50 50 200 50 200 150 200 50 350 50 350 150")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50, 200 c0,-100 150,-100 150,0 c0,-100 150,-100 150,0")
path19b = parse_path("M50, 200 c0,-100 150,-100 150,0 0,-100 150,-100 150,0")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50 250 S125 200 200 250 S275, 200 350 250")
path19b = parse_path("M50 250 S125 200 200 250 275, 200 350 250")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50 275 s75 -50 150 0 s75, -50 150 0")
path19b = parse_path("M50 275 s75 -50 150 0 75, -50 150 0")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50 300 Q 125 275 200 300 Q 275 325 350 300")
path19b = parse_path("M50 300 Q 125 275 200 300 275 325 350 300")
self.assertEqual(path19a, path19b)
path19a = parse_path("M50 325 q 75 -25 150 0 q 75 25 150 0")
path19b = parse_path("M50 325 q 75 -25 150 0 75 25 150 0")
self.assertEqual(path19a, path19b)
path19a = parse_path("M425 25 T 425 75 T 425 125")
path19b = parse_path("M425 25 T 425 75 425 125")
self.assertEqual(path19a, path19b)
path19a = parse_path("M450 25 t 0 50 t 0 50")
path19b = parse_path("M450 25 t 0 50 0 50")
self.assertEqual(path19a, path19b)
path19a = parse_path("M400,200 A25 25 0 0 0 425 150 A25 25 0 0 0 400 200")
path19b = parse_path("M400,200 A25 25 0 0 0 425 150 25 25 0 0 0 400 200")
self.assertEqual(path19a, path19b)
path19a = parse_path("M400,300 a25 25 0 0 0 25 -50 a25 25 0 0 0 -25 50")
path19b = parse_path("M400,300 a25 25 0 0 0 25 -50 25 25 0 0 0 -25 50")
self.assertEqual(path19a, path19b)
def test_wc3_examples20(self):
"""
W3C_SVG_11_TestSuite Paths
Tests parsing of the elliptical arc path syntax.
"""
path20a = parse_path("M120,120 h25 a25,25 0 1,0 -25,25 z")
path20b = parse_path("M120,120 h25 a25,25 0 10 -25,25z")
self.assertEqual(path20a, path20b)
path20a = parse_path("M200,120 h-25 a25,25 0 1,1 25,25 z")
path20b = parse_path("M200,120 h-25 a25,25 0 1125,25 z")
self.assertEqual(path20a, path20b)
path20a = parse_path("M280,120 h25 a25,25 0 1,0 -25,25 z")
self.assertRaises(Exception, 'parse_path("M280,120 h25 a25,25 0 6 0 -25,25 z")')
path20a = parse_path("M360,120 h-25 a25,25 0 1,1 25,25 z")
self.assertRaises(
Exception, 'parse_path("M360,120 h-25 a25,25 0 1 -1 25,25 z")'
)
path20a = parse_path("M120,200 h25 a25,25 0 1,1 -25,-25 z")
path20b = parse_path("M120,200 h25 a25,25 0 1 1-25,-25 z")
self.assertEqual(path20a, path20b)
path20a = parse_path("M200,200 h-25 a25,25 0 1,0 25,-25 z")
self.assertRaises(Exception, 'parse_path("M200,200 h-25 a25,2501 025,-25 z")')
path20a = parse_path("M280,200 h25 a25,25 0 1,1 -25,-25 z")
self.assertRaises(
Exception, 'parse_path("M280,200 h25 a25 25 0 1 7 -25 -25 z")'
)
path20a = parse_path("M360,200 h-25 a25,25 0 1,0 25,-25 z")
self.assertRaises(
Exception, 'parse_path("M360,200 h-25 a25,25 0 -1 0 25,-25 z")'
)
def test_others(self):
# Other paths that need testing:
# Relative moveto:
path1 = parse_path("M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z")
self.assertEqual(
path1,
Path(
Move(0j),
Line(0 + 0j, 50 + 20j),
Move(100 + 100j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Close(200 + 300j, 100 + 100j),
),
)
# Initial smooth and relative CubicBezier
path1 = parse_path("M100,200 s 150,-100 150,0")
self.assertEqual(
path1,
Path(
Move(100 + 200j),
CubicBezier(100 + 200j, 100 + 200j, 250 + 100j, 250 + 200j),
),
)
# Initial smooth and relative QuadraticBezier
path1 = parse_path("M100,200 t 150,0")
self.assertEqual(
path1,
Path(Move(100 + 200j), QuadraticBezier(100 + 200j, 100 + 200j, 250 + 200j)),
)
# Relative QuadraticBezier
path1 = parse_path("M100,200 q 0,0 150,0")
self.assertEqual(
path1,
Path(Move(100 + 200j), QuadraticBezier(100 + 200j, 100 + 200j, 250 + 200j)),
)
def test_negative(self):
"""You don't need spaces before a minus-sign"""
path1 = parse_path("M100,200c10-5,20-10,30-20")
path2 = parse_path("M 100 200 c 10 -5 20 -10 30 -20")
self.assertEqual(path1, path2)
def test_numbers(self):
"""Exponents and other number format cases"""
# It can be e or E, the plus is optional, and a minimum of +/-3.4e38 must be supported.
path1 = parse_path("M-3.4e38 3.4E+38L-3.4E-38,3.4e-38")
path2 = Path(
Move(-3.4e38 + 3.4e38j), Line(-3.4e38 + 3.4e38j, -3.4e-38 + 3.4e-38j)
)
self.assertEqual(path1, path2)
def test_errors(self):
self.assertRaises(ValueError, parse_path, "M 100 100 L 200 200 Z 100 200")
def test_non_path(self):
# It's possible in SVG to create paths that has zero length,
# we need to handle that.
path = parse_path("M10.236,100.184")
self.assertEqual(path.d(), "M 10.236,100.184")
def test_issue_45(self):
# A missing Z in certain cases
path = parse_path(
"m 1672.2372,-54.8161 "
"a 14.5445,14.5445 0 0 0 -11.3152,23.6652 "
"l 27.2573,27.2572 27.2572,-27.2572 "
"a 14.5445,14.5445 0 0 0 -11.3012,-23.634 "
"a 14.5445,14.5445 0 0 0 -11.414,5.4625 "
"l -4.542,4.5420 "
"l -4.5437,-4.5420 "
"a 14.5445,14.5445 0 0 0 -11.3984,-5.4937 "
"z"
)
self.assertEqual(
"m 1672.24,-54.8161 "
"a 14.5445,14.5445 0 0,0 -11.3152,23.6652 "
"l 27.2573,27.2572 l 27.2572,-27.2572 "
"a 14.5445,14.5445 0 0,0 -11.3012,-23.634 "
"a 14.5445,14.5445 0 0,0 -11.414,5.4625 "
"l -4.542,4.542 "
"l -4.5437,-4.542 "
"a 14.5445,14.5445 0 0,0 -11.3984,-5.4937 "
"z",
path.d(),
)
def test_arc_flag(self):
"""Issue #69"""
path = parse_path(
"M 5 1 v 7.344 A 3.574 3.574 0 003.5 8 3.515 3.515 0 000 11.5 C 0 13.421 1.579 15 3.5 15 "
"A 3.517 3.517 0 007 11.531 v -7.53 h 6 v 4.343 A 3.574 3.574 0 0011.5 8 3.515 3.515 0 008 11.5 "
"c 0 1.921 1.579 3.5 3.5 3.5 1.9 0 3.465 -1.546 3.5 -3.437 V 1 z"
)
# Check that all elemets is there:
self.assertEqual(len(path), 15)
# It ends on a vertical line to Y 1:
self.assertEqual(path[-1].end.imag, 1)
def test_incomplete_numbers(self):
path = parse_path("M 0. .1")
self.assertEqual(path.d(), "M 0,0.1")
path = parse_path("M 0..1")
self.assertEqual(path.d(), "M 0,0.1")
|
[
"noreply@github.com"
] |
noreply@github.com
|
d353d5fff31bf52069940390a252ad8145de17fc
|
5ab30fd1b3fa74a8763abf8e948924f1c3dfceff
|
/cari-pakar web/manage.py
|
03ee59ef477f824b640f30bc261de94ecf176270
|
[
"MIT"
] |
permissive
|
eightfold28/Expert-Finding
|
cf342f0c922f26995854fd363cc6d1fcc0288e0a
|
09ebbad9162eb4c3481eb98d5f6a47e37820be6f
|
refs/heads/master
| 2022-12-09T13:21:20.479543
| 2018-11-20T12:58:32
| 2018-11-20T12:58:32
| 155,075,215
| 0
| 0
|
MIT
| 2022-12-08T01:02:09
| 2018-10-28T13:40:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "caripakar.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"13514028@std.stei.itb.ac.id"
] |
13514028@std.stei.itb.ac.id
|
8dbad1d4354d63d045a7b9f71ef8405a05615120
|
e16cc78f0e05e50d589558535ae0fc5e414dd4a0
|
/IM5.4.0_timing/ztest_e_send5_video.py
|
1c13ecf0caf88ccbce3128b36482d9396bea79b6
|
[] |
no_license
|
wenqiang1990/wenqiang_code
|
df825b089e3bd3c55bcff98f4946f235f50f2f3d
|
3c9d77e0a11af081c60a5b1f4c72ecd159945864
|
refs/heads/master
| 2020-06-19T04:38:39.052037
| 2019-12-18T03:40:39
| 2019-12-18T03:40:39
| 196,561,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,642
|
py
|
#coding:utf-8
import time
import datetime
import unittest
from appium.webdriver.common.touch_action import TouchAction
from robot.utils.asserts import *
from appium import webdriver
from public import login
from public import logout
from clear_massage import clear_massage
from set_driver import set_driver
class Imtest(unittest.TestCase):
def setUp(self):
wq=set_driver()
self.driver=wq.get_driver()
self.verificationErrors = []
self.driver.implicitly_wait(10)
def test_send_video(self):
'''群成员发送图片消息'''
clear_massage(self,name="groupname1")
clear_massage(self,name=u"系统通知")
driver = self.driver
with open('F:\Appium\group\groupID.txt','r') as f:
el=f.read()
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/btn_address_list").click()#点击联系人
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/tv_head_group").click()#点击群组
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/p_list").click()#点击群组列表
el=u"群组id:"+el
driver.find_element_by_name(el).click()#点击群组id,以后改为读取上一条用例创建群组的id
#群成员发送图片
self.driver.find_element_by_id("chatting_attach_btn").click()#点击加号
self.driver.find_element_by_name(u"短视频").click()
time.sleep(2)
action1 = TouchAction(self.driver)
el = self.driver.find_element_by_id("com.yuntongxun.eckuailiao:id/start")
action1.long_press(el,duration=10000).perform()
self.driver.find_element_by_id("com.yuntongxun.eckuailiao:id/ok").click()#点击发送
time.sleep(5)
el=self.driver.find_element_by_id("tv_read_unread").get_attribute("text")
assert_equal(el, u"已读", msg=u"状态验证失败")
print el+u" 阅读状态验证成功"
el = self.driver.find_element_by_id("tv_read_unread")#状态
action1 = TouchAction(self.driver)
action1.long_press(el,duration=5000).perform()
self.driver.find_element_by_name(u"删除").click()
self.driver.find_element_by_id("dilaog_button3").click()#确认删除
time.sleep(2)
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
# 构造测试集
suite = unittest.TestSuite()
suite.addTest(Imtest("test_send_video"))
# 执行测试
runner = unittest.TextTestRunner()
runner.run(suite)
|
[
"1058099258@qq.com"
] |
1058099258@qq.com
|
b0b39a1067ec0844dbfe72eea92692cd8d97b530
|
02719c273e8b8903ae5a1980588e0708214c795d
|
/setup.py
|
8b9e15861fdfd69a026f87cb46fa3aa4ceb3ab59
|
[
"MIT"
] |
permissive
|
sushmitakullu/setupenzyme
|
d5fcd840a1d5770e63e26b540b7a0cd05dde6c63
|
1c6a507c9fbce9cc96a7d1683a39d5f42a17fe9c
|
refs/heads/main
| 2023-01-02T23:35:28.230364
| 2020-11-01T18:19:12
| 2020-11-01T18:19:12
| 309,159,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='joshbasiccalculator',
version='0.0.1',
description='A very basic calculator',
long_description=open('README.txt').read() + '\n\n' + open('CHANGELOG.txt').read(),
url='',
author='Joshua Lowe',
author_email='josh@edublocks.org',
license='MIT',
classifiers=classifiers,
keywords='calculator',
packages=find_packages(),
install_requires=['']
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d9e8087dbb23e198465e9cec104a3c58229c202a
|
db87935ba376500c6aa294980c5939f9cb90c9af
|
/python.py
|
9c91b80b08808b1da0b8e0bd1e06bdfd492e1b88
|
[] |
no_license
|
oldssc/Python_Code
|
e65c488b4a6ec3c19c44a358d865cb67f686d621
|
893b13b75bb2e214d9a2d8ab4ff478d70bd2812e
|
refs/heads/master
| 2021-01-23T16:12:33.859037
| 2017-06-04T03:30:15
| 2017-06-04T03:30:15
| 93,286,350
| 0
| 0
| null | 2017-06-04T03:27:09
| 2017-06-04T02:53:33
| null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
people={
'Alan':{
'location':'China',
'height':"5'11",
'favorite_color':'red',
},
'Johnny':{
'location':'Barrington',
'height':"5'8",
'favorite_color':'blue'
},
'Mike':{
'location':'Barrington',
'height':"6'1",
'favorite_color':'yellow'
}
}
for name, value in people.items():
print(name+"'s info are:\n")
print("Location is: "+value['location'])
print("height is: "+value['height'])
print("favorite color is: "+value['favorite_color'])
|
[
"noreply@github.com"
] |
noreply@github.com
|
38a36ff84c8aa72ffacb7058e7044aa7d10f74d7
|
b0f4ad442ea7530c8475baadad0fadc096e60b5c
|
/laskarit/viikko2/maksukortti/tasks.py
|
5a249200e85cc96f146e2c2de58931eb4d0ce8ba
|
[] |
no_license
|
henkkah/ot-harjoitustyo
|
42448c6e085665570eeb51a4afee05a7dcc8ca01
|
f94eb085043f4bb72cfc2b6b6c5c716f527bb8fd
|
refs/heads/master
| 2023-04-20T12:29:34.812996
| 2021-05-14T12:00:30
| 2021-05-14T12:00:30
| 349,364,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from invoke import task
@task
def start(ctx):
ctx.run("python src/index.py")
@task
def test(ctx):
ctx.run("pytest src")
@task
def coverage(ctx):
ctx.run("coverage run --branch -m pytest src")
@task(coverage)
def coverage_report(ctx):
ctx.run("coverage html")
|
[
"henrik.harjula@outlook.com"
] |
henrik.harjula@outlook.com
|
e27002f755337a917bd5ab17abc80d304144de8c
|
7b9f66b2ea1079dd9cee5b82b8ae5897e9c2f45f
|
/Medium/915-Partition_array_into_disjoint_intervals.py
|
2e0c41d711dd9ab614f851c41611c8d0eefff3a0
|
[] |
no_license
|
carminelaluna/Leetcode-Solutions
|
4b2c797ae180f9095631a47c60264e2876a38acd
|
807e04d881874bc60e08443a57d8aea2c9e5b5aa
|
refs/heads/main
| 2023-06-24T18:26:25.246763
| 2021-07-25T19:14:06
| 2021-07-25T19:14:06
| 388,899,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
class Solution:
def partitionDisjoint(self, nums: List[int]) -> int:
max_l = nums[0]
max_succ = 0
res = 0
for i in range(1,len(nums)):
if nums[i] > max_succ:
max_succ = nums[i]
if nums[i] < max_l:
res = i
if max_succ > max_l:
max_l = max_succ
return res + 1
|
[
"carmine.laluna@gmail.com"
] |
carmine.laluna@gmail.com
|
378aa46e73e900bb233585d55176e247b746501d
|
446f2a774ab17cb434ac5695ef26ee239e5a81c1
|
/ProjectEuler/Problems 1 - 50/P049.py
|
9e13e7d4b297ab1a889698a3c0f61bd04d7bf774
|
[] |
no_license
|
ArturMroz/Project-Euler
|
2453b195d6ff057853dcaf418b551ffc9a8aa070
|
8eaa693290d953ab587a6818df06d7b5f7d6fe51
|
refs/heads/master
| 2021-01-10T22:03:23.280106
| 2019-02-17T13:30:24
| 2019-02-17T13:30:24
| 16,026,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
# https://projecteuler.net/problem=49
# Concatenate the three terms in sequence where
# (i) each of the three terms are prime, and,
# (ii) each of the 4-digit numbers are permutations of one another.
def eratosthenes(lim):
a = [True] * lim
a[0] = a[1] = False
for i, v in enumerate(a):
if v:
yield i
for n in range(i * i, lim, i):
a[n] = False
def solve():
primes = set(eratosthenes(9999))
for p in sorted(primes):
if p < 1488:
continue
for step in range(1, 3333):
p2, p3 = p + step, p + step * 2
if p2 in primes and p3 in primes:
s1, s2, s3 = str(p), str(p2), str(p3)
if sorted(s1) == sorted(s2) == sorted(s3):
return s1 + s2 + s3
print(solve())
|
[
"frost47@gmail.com"
] |
frost47@gmail.com
|
d65afb1f6543a2a444321d7e56c5b38b5e534b50
|
d83b1e44fdb5969001e7ca31b15658084cfc6a00
|
/poker/models.py
|
f8cd80d9ec1e2428d828e7eb509a1bcdc233b7e0
|
[] |
no_license
|
chrhyman/wugs-site
|
c7134d6025cb68e6b81a04088a5cb22eef551e2e
|
f73799fbecd2d7b55ed8e4362c868569a32f7606
|
refs/heads/master
| 2021-01-20T18:45:12.133731
| 2016-07-13T05:31:35
| 2016-07-13T05:31:35
| 62,846,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
from app import db
class PokerGame(db.Model):
__tablename__ = "pokergame"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64))
startmoney = db.Column(db.Integer)
endmoney = db.Column(db.Integer)
handsplayed = db.Column(db.Integer)
|
[
"chrhyman@gmail.com"
] |
chrhyman@gmail.com
|
20cc51e3bc9d8f64ab428a9a2f9270a264ffdf30
|
de4ec418ec12cfcd35bbe59fe656268873240381
|
/video/utils.py
|
1b4b0c361cda0b1f8cea019d8f26a5885060f89f
|
[] |
no_license
|
ffabulous/multimodal
|
9069d70fc571dd5234ef22772e39842eb17a41ae
|
14352c9609d7f5391c0eca9f45c8bb116acebe56
|
refs/heads/master
| 2022-09-30T23:52:40.489301
| 2020-06-05T10:04:41
| 2020-06-05T10:04:41
| 269,588,120
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,157
|
py
|
from __future__ import print_function
from collections import Counter, defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import errno
import os
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self): # NOTE: moving avg
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self): # NOTE: last one
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger(object):
def __init__(self, log_dir, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
# tensorboard
self.tx = None
self.is_tx = log_dir is not None
if self.is_tx:
self.tx = SummaryWriter(log_dir)
self.step = 1
self.label_cnt = defaultdict(Counter)
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def update_counter(self, cnt):
for k, c in cnt.items():
self.label_cnt[k] += c
def label_accuracy(self):
for k, nom in self.label_cnt["total"].items():
den = self.label_cnt["1"][k]
acc = den / float(nom) * 100
print("{}: {:.1f} ({} / {})".format(k, acc, den, nom))
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
if name.endswith("valid"):
continue
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_tx(self, is_train):
if is_train:
self.tx.add_scalar("lr", self.meters["lr"].value, self.step)
self.tx.add_scalars("loss", {"train": self.meters["loss/train"].value}, self.step)
self.tx.add_scalars("acc", {"train": self.meters["acc/train"].value}, self.step)
elif self.is_tx: # NOTE: epoch 끝날때마다
self.tx.add_scalars("loss", {"valid": self.meters["loss/valid"].global_avg}, self.step)
self.tx.add_scalars("acc", {"valid": self.meters["acc/valid"].global_avg}, self.step)
def log_every(self, iterable, log_freq, header, is_train=True):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
# NOTE: 아래는 한 step 이후에 실행 됨
iter_time.update(time.time() - end)
if log_freq > 0 and i % log_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
)
)
if self.is_tx:
self.log_tx(is_train)
i += 1
if is_train:
self.step += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("{} Total time: {}".format(header, total_time_str))
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
cnt = {"total": Counter(target.tolist())}
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
cnt[str(k)] = Counter(pred[:k][correct[:k]].tolist())
res.append(correct_k * (100.0 / batch_size))
return res, cnt
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
setup_for_distributed(args.rank == 0)
|
[
"keunchan.park@navercorp.com"
] |
keunchan.park@navercorp.com
|
e9d1e0e03a5772e4e6ac7caabdd881d31e059704
|
ed29bdb7bcfc52c1a5d66e70c6b46a2db3beedc1
|
/while_loop_with_no._of_printing.py
|
4c9ad17812c100ee4f194b559463b79e50e5d2dd
|
[] |
no_license
|
sahilkumar171193/mycodes
|
36082a07b11ddb35ad037e704b1d210d9a41dcdc
|
9e86eeb544b68c070f292659a336732e8c899234
|
refs/heads/main
| 2023-02-17T03:29:33.840130
| 2021-01-14T16:22:57
| 2021-01-14T16:22:57
| 329,668,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
i=1
while i<=5:
print("hello sahil",i)
i=i+1
print("everything printed with number of appearance")
|
[
"noreply@github.com"
] |
noreply@github.com
|
e44f0c91aacf007f995d8c725da557c967847454
|
1a8ade846000a43d64dbc8862991c15080798332
|
/env/bin/epylint
|
dbc2149c18d9613e1995a5d193c58cde66938cbf
|
[] |
no_license
|
felipesantos10/CoronaBot
|
355bee16d7c385920c90b4da69df962e9dfd5427
|
a71e014b790904a58d1916f5ac0766a7caa1848b
|
refs/heads/main
| 2023-01-04T20:43:08.873716
| 2020-10-20T22:01:33
| 2020-10-20T22:01:33
| 305,843,103
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
#!/home/felipe/Documentos/coronabot/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
|
[
"felipedev.ti@gmail.com"
] |
felipedev.ti@gmail.com
|
|
f516a8cbb18cf9b4a876221ca2d3d975e36ecc3c
|
c807ab961d0dea5eb3936547e843673247639a07
|
/Personel/pooja/Python/Assignment - 11/consecutive_duplicates.py
|
31b412c923c633117707c634e3440c5207c770e4
|
[] |
no_license
|
shankar7791/MI-11-DevOps
|
dbac94ca1fb4627ae44658701bcddcd22c65a3d4
|
63a0a65b05192439575ed2c47a6c3d33c5be87d2
|
refs/heads/main
| 2023-07-12T20:33:30.121801
| 2021-08-13T03:01:17
| 2021-08-13T03:01:17
| 355,145,424
| 0
| 4
| null | 2021-08-12T19:31:44
| 2021-04-06T10:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
from itertools import groupby
def remove_all_consecutive(str1):
result_str = []
for (key,group) in groupby(str1):
result_str.append(key)
return ''.join(result_str)
str1 = input("Enter string:")
print("Original string:" + str1)
print("After removing consecutive duplicates:" )
print(remove_all_consecutive(str1))
|
[
"poojanandura@gmail.com"
] |
poojanandura@gmail.com
|
e3f4e1bc264e4e9e928ef3ebb533de57033f0c84
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/tools/perf/measurements/power.py
|
58551ae3207e2de8e876bea951fc32323d5b63c9
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 1,961
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import network
from metrics import power
from telemetry.core import util
from telemetry.page import legacy_page_test
class Power(legacy_page_test.LegacyPageTest):
"""Measures power draw and idle wakeups during the page's interactions."""
def __init__(self):
super(Power, self).__init__()
self._power_metric = None
self._network_metric = None
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
self._network_metric = network.NetworkMetric(platform)
def WillNavigateToPage(self, page, tab):
self._network_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._network_metric.Stop(page, tab)
self._power_metric.Stop(page, tab)
self._network_metric.AddResults(tab, results)
self._power_metric.AddResults(tab, results)
def DidRunPage(self, platform):
del platform # unused
self._power_metric.Close()
class LoadPower(Power):
def WillNavigateToPage(self, page, tab):
self._network_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
pass
class QuiescentPower(legacy_page_test.LegacyPageTest):
"""Measures power draw and idle wakeups after the page finished loading."""
# Amount of time to measure, in seconds.
SAMPLE_TIME = 30
def ValidateAndMeasurePage(self, page, tab, results):
if not tab.browser.platform.CanMonitorPower():
return
util.WaitFor(tab.HasReachedQuiescence, 60)
metric = power.PowerMetric(tab.browser.platform)
metric.Start(page, tab)
time.sleep(QuiescentPower.SAMPLE_TIME)
metric.Stop(page, tab)
metric.AddResults(tab, results)
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
721bf55ac66a7be5ecaef7be3efb4eea3d553e7f
|
5656be0eb8dcd787f78f5ce47a735c74f3359b30
|
/export.py
|
4a985e9975b59272330882ba36d7d8ee074b9996
|
[
"MIT"
] |
permissive
|
mgornik/PEAN
|
875c3bd2fb12ad88152f6dea21abcf6a73b7674c
|
bab1d11b4485b8a68edfe78018d0055816d6c999
|
refs/heads/master
| 2020-09-09T19:26:40.869432
| 2019-12-03T09:25:43
| 2019-12-03T09:25:43
| 221,542,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
# -*- coding: utf-8 -*-
import util
from os import path
from subprocess import call
from assignment_status import *
from lxml import etree
from lxml import objectify
def execute_export_command(config, criteria, stations):
"""
Izvršavanje komande koja vrši izvoz rezultata u format koji se može lako uvesti u Evidenciju
config - globalna konfiguracija alata za pregled
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
stations - kolekcija računara i studenata koji su radili zadatak (ključ - oznaka računara, podatak - lista - broj
indeksa i ime/prezime studenta)
"""
error_message = 'Interna greška: format XML fajla nije validan!\nFajl čije parsiranje nije uspelo: {0}'\
.format(config.FINAL_REPORT_FILENAME)
if not(path.isfile(config.FINAL_REPORT_FILENAME)):
util.fatal_error('Ne može se izvršiti izvoz podataka pošto fajl sa izveštajem još uvek ne postoji!')
else:
with open(config.FINAL_REPORT_FILENAME) as f:
xml = f.read()
root = objectify.fromstring(xml)
# Provera dve uslova:
# 1) Da li su svi radovi ocenjeni?
# 2) Da li postoje preskočeni radovi u izveštaju?
# Ako je bilo koji od ovih uslova tačan, izvoz rezultata nije moguć:
done_stations = {}
for child in root.getchildren():
if child.tag != 'assignment':
util.fatal_error(error_message)
if child['status'] == ASSIGNMENT_STATUS_SKIPPED:
util.fatal_error('Ne može se izvršiti izvoz rezultata jer postoje preskočeni radovi!\n'
+ 'Molim da ocenite ove radove pa pokušate izvoz ponovo.')
done_stations[child.attrib['station']] = 1
if set(stations) != set(done_stations):
util.fatal_error('Ne može se izvršiti izvoz rezultata jer nisu svi radovi ocenjeni!\n'
+ 'Molim da ocenite ove radove pa pokušate izvoz ponovo.')
try:
criteria.total_points # Provera da li je definisana varijabla
except NameError:
criteria.total_points = 100
total_points_f = float(criteria.total_points)
with open (config.EXPORTED_REPORT_FILENAME, 'w') as wfile:
# Upis zaglavlja u CSV fajl:
wfile.write('indeks,ime,prezime,poeni,ukupno_poena,ip,datum\n')
for child in root.getchildren():
if child.tag != 'assignment':
util.fatal_error(error_message)
indeks = child['id']
naziv = child['name'].text
razmak = naziv.find(' ')
# Odredjivanje imena i prezimena:
if razmak == -1:
ime = naziv
prezime = ''
else:
ime = naziv[:razmak]
prezime = naziv[razmak+1:]
final_score = float(child['final-pct'])
poeni = int(round(final_score * (total_points_f / 100.0), 0))
wfile.write('"{0}","{1}","{2}",{3},{4},,\n'.format(indeks, ime, prezime, poeni, criteria.total_points))
command = config.COMPRESS_REPORT_COMMAND
ret = call(command, shell=True)
if ret != 0:
util.fatal_error('''Pokretanje alata za komprimovanje CSV izveštaja u ZIP arhivu nije uspelo!
Komanda koja je pokrenuta:\n{0}'''.format(command))
print 'Završen je izvoz podataka. Arhiva {0} sadrži rezultate pregleda.'.format(config.EXPORTED_ARCHIVE_FILENAME)
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.