blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
818657e277840e11174c5f6d239c39a9b39f8b59
|
d445862efb3d2970184e9cf2c6ed5ffa3fbc5b16
|
/flaskblog/models.py
|
f1bf45e144db3bf9bced5f19d6a0d7bfc43763bc
|
[] |
no_license
|
abhinavk001/Flaskblog
|
01297ccc810045461200f118881ed9599206936d
|
09fbc1f0429ea6c7e84a5a23c0cfd0025ffd4491
|
refs/heads/main
| 2023-01-12T15:38:04.594525
| 2020-11-21T05:09:50
| 2020-11-21T05:09:50
| 313,699,034
| 0
| 1
| null | 2020-11-21T05:09:51
| 2020-11-17T17:51:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
from datetime import datetime
import pytz
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flaskblog import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}','{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
tz = pytz.timezone('Asia/Calcutta')
date_posted = db.Column(db.DateTime, nullable=False, default=pytz.utc.localize(datetime.utcnow(), is_dst=None).astimezone(tz))
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
|
[
"godofwars0017@gmail.com"
] |
godofwars0017@gmail.com
|
e96a934f3c2c33f4107aa1d4e1c91107fa3083ee
|
10d17864a685c025bb77959545f74b797f1d6077
|
/capitulo 09/09.39.py
|
958b1c899df533fdaa5051df081ab12640d19d01
|
[] |
no_license
|
jcicerof/IntroducaoPython
|
02178d2dfcaa014587edbd3090c517089ccef7c2
|
02e619c7c17e74acdc3268fbfae9ab624a3601dd
|
refs/heads/master
| 2020-04-24T18:12:21.422079
| 2019-02-23T05:14:43
| 2019-02-23T05:14:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 09\09.39.py
# Descrição:
##############################################################################
momento.weekday()
|
[
"jose.cicero@gmail.com"
] |
jose.cicero@gmail.com
|
3d65d74d70ae98884bedfe6b4893adc504e45293
|
b0dec04a6d705d5f61a3ec0793c613745871db28
|
/PythonTestScripts/venv/Scripts/easy_install-script.py
|
651b092909ff9d3b56a69107df5fd232b30325fa
|
[] |
no_license
|
VeyselKayaturk/DataBoss
|
abedf19d35e9bacabc06bec042b9b61ff8d6e8b3
|
948a915ddd85b46129d427bbf3854a20b788d6ee
|
refs/heads/master
| 2023-02-28T11:32:19.346033
| 2021-01-31T18:32:52
| 2021-01-31T18:32:52
| 334,718,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!C:\Users\veysel\DataBoss\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
|
[
"veysel_kayaturk@hotmail.com"
] |
veysel_kayaturk@hotmail.com
|
0608037a4fd1450b915c965d8feff2eace74b368
|
89bc3d20aca14a04bcb40cf756df5291299ed901
|
/event_handler.py
|
dc7650d6a74c40257ef40b284fafec84c0d18399
|
[] |
no_license
|
Igluminati/WeObserve
|
a986c054d9c069fddb78c40412c1cea4dba52577
|
4e6a0d56b00c4a8e95e140d33a13f3cfe88b282b
|
refs/heads/main
| 2023-06-14T13:23:43.486375
| 2021-07-08T10:40:16
| 2021-07-08T10:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
import watchdog.events, watchdog.observers, os.path, time, socket, datetime
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
# Handles Events:
class Events(watchdog.events.PatternMatchingEventHandler):
fucky_endings = {"swp","swx", "swpx"}
def __init__(self):
# Search for a specific file extension (in this case it searches for all)
watchdog.events.PatternMatchingEventHandler.__init__(self, patterns=['*'],
ignore_directories=True, case_sensitive=False)
# File creation event:
def on_created(self, event):
if event.src_path[-3:] in self.fucky_endings:
return
c_notification = "Received modification event - % s - % s - % s" % (event.src_path, time.ctime(os.path.getctime(event.src_path)), IPAddr)
#print(c_notification) # Prints immediately
print(c_notification, file=open("output.txt", "a")) # Prints into specified txt file
# File modification event:
def on_modified(self, event):
if event.src_path[-3:] in self.fucky_endings:
return
m_notification = "Received modification event - % s - % s - % s" % (event.src_path, time.ctime(os.path.getmtime(event.src_path)), IPAddr)
#print(m_notification)
print(m_notification, file=open("output.txt", "a"))
# File deletion event:
def on_deleted(self, event):
if event.src_path[-3:] in self.fucky_endings:
return
d_notification = "File/Directory was deleted - % s - % s - % s" % (event.src_path, datetime.datetime.now(), IPAddr)
#print(d_notification)
print(d_notification, file=open("output.txt", "a"))
if __name__ == "__main__":
src_path = "test_directory" # /path/to/directory
event_handler = Events()
observer = watchdog.observers.Observer()
observer.schedule(event_handler, path=src_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"noreply@github.com"
] |
Igluminati.noreply@github.com
|
b2559814942217b99314c72521b236d547d3f899
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_symbolising.py
|
77549569cc24bdea8347e94d79e7aa93a485c531
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
#calss header
class _SYMBOLISING():
def __init__(self,):
self.name = "SYMBOLISING"
self.definitions = symbolise
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['symbolise']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
efffd98635cae516be11bfc3ea1fd3069065a97f
|
c7787d7e2517cc909d0514e8f45ff96505b40eb0
|
/Deep Neural Network/nn/optimizer.py
|
4834ed2dbc58c6eb8390658526aa6ce2e039812d
|
[] |
no_license
|
zhoubaohang/deep-learning-notes
|
5037b6a91e83dba2856b7aaee2e9162bac967dad
|
d912a0dff206f3ff689a8e9ce59aa20f50e87000
|
refs/heads/master
| 2020-03-28T05:31:55.630779
| 2018-11-09T14:33:35
| 2018-11-09T14:33:35
| 147,782,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 27 18:34:19 2018
@author: 周宝航
"""
import numpy as np
class Momentem(object):
def __init__(self, shape_vw, shape_vb, beta=0.9):
self.beta = beta
self.vw = np.zeros(shape_vw)
self.vb = np.zeros(shape_vb)
def getWeight(self, dW, db, t=0):
self.vw = self.beta * self.vw + (1. - self.beta) * dW
self.vb = self.beta * self.vb + (1. - self.beta) * db
return (self.vw, self.vb)
class RMSProp(object):
def __init__(self, shape_sw, shape_sb, beta=0.999):
self.beta = beta
self.sw = np.zeros(shape_sw)
self.sb = np.zeros(shape_sb)
def getWeight(self, dW, db, t=0):
self.sw = self.beta * self.sw + (1. - self.beta) * dW**2
self.sb = self.beta * self.sb + (1. - self.beta) * db**2
return (dW/np.sqrt(self.sw + 1e-6), db/np.sqrt(self.sb + 1e-6))
class Adam(object):
def __init__(self, shape_w, shape_b, beta1=0.9, beta2=0.999, epsilon=1e-8):
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.momentom = Momentem(shape_w, shape_b, beta1)
self.rmsprop = RMSProp(shape_w, shape_b, beta2)
def getWeight(self, dW, db, t):
vw, vb = self.momentom.getWeight(dW, db)
u_sw, u_sb = self.rmsprop.getWeight(dW, db)
sw, sb = (dW/u_sw)**2, (db/u_sb)**2
vw_correct = vw / (1. - self.beta1**t)
vb_correct = vb / (1. - self.beta1**t)
sw_correct = sw / (1. - self.beta2**t)
sb_correct = sb / (1. - self.beta2**t)
update_w = vw_correct / np.sqrt(sw_correct + self.epsilon)
update_b = vb_correct / np.sqrt(sb_correct + self.epsilon)
return (update_w, update_b)
|
[
"zbh12306@163.com"
] |
zbh12306@163.com
|
92babcab54a5514ed755293c6bbcce524d10149b
|
db3551a44acfcec1eaee6bbbc35efad594d1496c
|
/data/es_data_processing.py
|
2a2612d8ae1f942ec628d77e4d275049176f1b33
|
[] |
no_license
|
chesiver/CS6400_Travel_Recommend
|
859febfb5cfd54e63ff34ea89f4d53654dbe7742
|
ce6d11c080dc1edc8db79c3abe45f289e894d47d
|
refs/heads/master
| 2021-05-15T19:00:17.248979
| 2017-12-07T18:51:40
| 2017-12-07T18:51:40
| 107,738,694
| 0
| 1
| null | 2017-12-07T18:51:41
| 2017-10-21T00:24:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
import pandas as pd
data = pd.read_csv('data/1.csv')
data['id'] = data.index
data = data[['id','name','city', 'country', 'intro']]
filename = "destinations.data"
with open(filename, mode="wb") as outfile:
outfile.write(data.to_json(orient='records', lines = True))
cities = data['city'].drop_duplicates()
cities.index = pd.RangeIndex(len(cities.index))
cities = cities.to_frame()
cities['id'] = cities.index
cities = cities[['id', 'city']]
filename = "cities.data"
with open(filename, mode="wb") as outfile:
outfile.write(cities.to_json(orient='records', lines = True))
countries = data['country'].drop_duplicates()
filename = "countries.data"
with open(filename, mode="wb") as outfile:
for country in countries:
outfile.write("{\"country\":" + "\"" + country + "\"" + "}\n")
countries = data['country'].drop_duplicates()
countries.index = pd.RangeIndex(len(countries.index))
countries = countries.to_frame()
countries['id'] = countries.index
countries = countries[['id', 'country']]
filename = "countries.data"
with open(filename, mode="wb") as outfile:
outfile.write(countries.to_json(orient='records', lines = True))
outfile = "travel.data"
dest_index = "{\"index\": {\"_index\": \"travelsearch\", \"_type\": \"destination\"}}" + "\n"
city_index = "{\"index\": {\"_index\": \"travelsearch\", \"_type\": \"cities\"}}" + "\n"
country_index = "{\"index\": {\"_index\": \"travelsearch\", \"_type\": \"countries\"}}"+ "\n"
with open(outfile, mode="wb") as outfile:
with open('destinations.data', mode="r") as infile:
for line in infile:
outfile.write(dest_index)
outfile.write(line)
with open('cities.data', mode="r") as infile:
for line in infile:
outfile.write(city_index)
outfile.write(line)
with open('countries.data', mode="r") as infile:
for line in infile:
outfile.write(country_index)
outfile.write(line)
|
[
"chli4250@uni.sydney.edu.au"
] |
chli4250@uni.sydney.edu.au
|
90c5ddfa0aa0c5bc388b6a212b9985fad2221620
|
4f837af7475b627f354118d13fec8dbff834abd4
|
/wechat_spider/history_articles_spider.py
|
401f00ea71eeaf549a0f26c3ddef47094e0d93c1
|
[] |
no_license
|
DevRoss/spider
|
7396149265faa164234d21ea25c89d088ac4544b
|
d4f52695ca838035ecd0cd002e27f9c81d7d4846
|
refs/heads/master
| 2020-04-05T11:22:44.407261
| 2017-08-10T03:53:04
| 2017-08-10T03:53:04
| 81,425,612
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,016
|
py
|
import requests
from bs4 import BeautifulSoup
import re
import json
params = {
'SCAU': {
'biz': 'MzIyMjEwMjYxMA',
'uin': 'MjIxMDM2OTM2MQ%3D%3D',
'key': 'bfaceaac636432aaf7364292606c0d1679d45c845fee535185129bdd102d4e3fd3888d978c9d1469deab7d1afc7becd835ba69e50551595eefca33ea38e7851c341b3344d6cefbbb523ac3a5feb7f76b',
'scene': '124&devicetype',
'pass_ticket': 'HL%2BQHOEcvwQGdCCyp6oFf40BX8gcZdIENpHCDQX1w361iJ%2FBsnIH7meE%2BHiXJ4w7'
}
}
'''
实例化时传入account目标公众号
'''
class Article:
def __init__(self, account=None):
self.biz = None
self.uin = None
self.key = None
if account is not None:
self.biz = params[account]['biz']
self.uin = params[account]['uin']
self.key = params[account]['key']
self.scene = params[account]['scene']
self.pass_ticket = params[account]['pass_ticket']
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Cookie': 'pgv_pvi=9787430912; pgv_pvid=5941961831; sd_userid=99401491106713589; sd_cookie_crttime=1491106713589; pgv_info=ssid=s4976549360; wap_sid=CNGe/p0IEkB3dERaYm8xZU96MXhxaF9BeTRVb1czZjg5dHdPMUhpWEt2RFpxUWljSjJ2cUxTcHNsay1zeVBYRlk4Xy1ZS3UxGAQg/REo0sS1gAww/bXSxwU=; wap_sid2=CNGe/p0IElxVTVRsQ1RnVXpabjduUjFKdkY0TWUxaHpGNENaMWdYMFBHRmhpZXRyejFpMkhJNERDbGpmZzUwQVNCWVBKaXBjUjNTTjB2UjBTQmh4MnJMRTFudUNTSVFEQUFBfjD9tdLHBQ==',
'Host': 'mp.weixin.qq.com',
'X-Requested-With': 'com.tencent.mm',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.1; Nexus 5 Build/NOF27B; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 Mobile Safari/537.36 MicroMessenger/6.5.7.1041 NetType/WIFI Language/zh_CN',
# 'x-wechat-key': '926cd28a4aba000edb9f388d9df05262445a1683abb94e0b9f8589716d739d8ecb2aa758ee89e8268e2583fb6f8ec6e0c56e4620fc0e200adcf07c41e1f1fcfb5354e3b291a55a06cc0b09f4a335fe29',
# 'x-wechat-uin': 'MjIxMDM2OTM2MQ%3D%3D'
}
self.raw_url = 'https://mp.weixin.qq.com/mp/getmasssendmsg?__biz={biz}==&from=1&uin={uin}&key={key}'
self.raw_history_url = 'https://mp.weixin.qq.com/mp/profile_ext?action=home&__biz={biz}==&scene={scene}=android-25&version=26050741&lang=zh_CN&nettype=WIFI&a8scene=3&pass_ticket={pass_ticket}&wx_header=1'
self.url = None
self.session = requests.session()
self.session.headers.update(self.headers)
def entrance(self):
# url合成
self.url = self.raw_history_url.format(biz=self.biz, uin=self.uin, key=self.key, scene=self.scene,
pass_ticket=self.pass_ticket)
print(self.url)
res = self.session.get(url=self.url)
# res.encoding = 'utf-8'
error = re.compile('<title>验证</title>')
print(res.text)
if re.search(error, res.text) is not None:
print('url失效')
msg_list = re.search(r"msgList = '(?P<json>.+)';", res.text)
print('------------------------------------------------------\n\n\n\n\n')
print(msg_list.group('json'))
# article_dict = json.loads(msg_list.group('json'))
# article_collection = []
# for article in article_dict['list']:
# article_detail = {}
# article_detail['content_url'] = article['app_msg_ext_info']['content_url']
# article_detail['img'] = article['app_msg_ext_info']['cover']
# article_detail['title'] = article['app_msg_ext_info']['title']
# for key, value in article_detail.items():
# print(key)
# print(value)
if __name__ == '__main__':
starter = Article(account='SCAU')
starter.entrance()
|
[
"820584458@qq.com"
] |
820584458@qq.com
|
3e14063ca00f455ff9db9ed329d5f1846980859c
|
fd87d932a9089a6a6855ae28a1d46d558349f40d
|
/pym/scripts/pym.py
|
300be0b5fa1d99c643ffc010a634371786349182
|
[] |
no_license
|
dmdm/Parenchym
|
c66b83d330495891002c09e4c0dae78ebf3c0d1f
|
63864cdaff76b9aa1b8dbe795eb537b5be5add3a
|
refs/heads/master
| 2020-04-06T15:32:28.762265
| 2014-12-18T22:58:17
| 2014-12-18T22:58:17
| 22,520,527
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,327
|
py
|
# -*- coding: utf-8 -*-
"""
``pym`` has several sub-commands to manage your Pym setup: you can
manage principals (users), roles and role memberships as well as install new
sites and check the integrity of existing sites.
The sub-commands are::
list-principals List principals
create-principal Create principal
update-principal Update principal with given ID
delete-principal Delete principal with given ID
list-roles List roles
create-role Create role
update-role Update role with given ID
delete-role Delete role with given ID
list-rolemembers List rolemembers
create-rolemember Create rolemember
delete-rolemember Delete rolemember with given ID
Type ``pym -h`` for general help and a list of the sub-commands,
``pym sub-command -h`` to get help for that sub-command.
``pym`` allows you to use different formats for input and output.
Choices are json, yaml (default) and tsv.
Tsv is handy if you want to review the output in a spreadsheet::
pym -c production.ini --format tsv list-principals > a && gnumeric a
Both, json and yaml allow inline-style. Here is an example of inline YAML::
pym -c production.ini --format yaml create-principal \\
'{principal: FOO5, email: foo5@here, pwd: FOO, roles: [foo, bar]}'
TTY? True
Locale? en_GB UTF-8
id: 106
display_name: FOO5
email: foo5@here
first_name: null
csrf_token: null
identity_url: null
is_blocked: false
is_enabled: false
last_name: null
login_time: null
notes: null
prev_login_time: null
principal: FOO5
pwd: FOO
owner: 2
ctime: '2012-12-07 07:47:23'
editor: null
mtime: null
role_names:
- foo
- bar
- users
Done.
Here is an example of creating a new site::
pym -c production.ini --format yaml create-site '{sitename: www.new-site.com, principal: {principal: sally, email: sally@example.com, pwd: FOO, first_name: Sally, last_name: Müller-Lüdenscheidt, roles: [some_role, other_role]}, title: Neue Site, site_template: default}'
TTY? True
Locale? en_GB UTF-8
Proceed to create a site in /tmp/sites (yes/NO)? yes
Copied template [...]/var/site-templates/default
Created role 'www.new-site.com' (108)
Created principal 'sally' (111)
Set principal 'sally' as member of role 'www.new-site.com'
Done.
To get an overview, which user is in which role, and if there are orphans (should not),
do::
pym -c production.ini --format tsv list-rolemembers > a && gnumeric a
"""
import logging
import os
import sys
import transaction
import argparse
import yaml
import time
from collections import OrderedDict
import datetime
import pym.models
import pym.lib
import pym.cli
import pym.auth.manager as authmgr
import pym.auth.const
# Init YAML to dump an OrderedDict like a regular dict, i.e.
# without creating a specific object tag.
def _represent_ordereddict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
yaml.add_representer(OrderedDict, _represent_ordereddict)
class PymCli(pym.cli.Cli):
def __init__(self):
super().__init__()
def list_users(self):
from pym.auth.models import User
qry = self._build_query(User)
data = self._db_data_to_list(qry,
fkmaps=dict(groups=lambda it: it.name))
self._print(data)
def list_users_with_groups(self):
from pym.auth.models import User
#qry = self._build_query(User)
sess = pym.models.DbSession()
users = sess.query(User)
data = {}
for u in users:
k = "u:{} ({})".format(u.principal, u.id)
groups = []
for gr in u.groups:
groups.append("g:{} ({})".format(gr.name, gr.id))
data[k] = groups
self._print(data)
def create_user(self):
data = self._parse(self.args.data)
data['owner'] = pym.auth.const.ROOT_UID
rs = authmgr.create_user(data)
self._print(self._db_data_to_list([rs],
fkmaps=dict(group_names=lambda it: it))[0])
def update_user(self):
data = self._parse(self.args.data)
data['editor'] = pym.auth.const.ROOT_UID
data['mtime'] = datetime.datetime.now()
rs = authmgr.update_user(data)
self._print(self._db_data_to_list([rs])[0])
def delete_user(self):
authmgr.delete_user(self.args.id)
def list_groups(self):
from pym.auth.models import Group
qry = self._build_query(Group)
data = self._db_data_to_list(qry)
self._print(data)
def list_groups_with_members(self):
from pym.auth.models import Group
#groups = self._build_query(Group)
sess = pym.models.DbSession()
groups = sess.query(Group)
data = {}
for gr in groups:
k = 'g:{} ({})'.format(gr.name, gr.id)
member_users = []
for mu in gr.member_users:
if not mu:
continue
member_users.append('{} ({})'.format(mu.principal, mu.id))
member_groups = []
for mg in gr.member_groups:
if not mg:
continue
member_groups.append('{} ({})'.format(mg.name, mg.id))
data[k] = {
'u': member_users,
'g': member_groups
}
self._print(data)
def create_group(self):
data = self._parse(self.args.data)
data['owner'] = pym.auth.const.ROOT_UID
rs = authmgr.create_group(data)
self._print(self._db_data_to_list([rs])[0])
def update_group(self):
data = self._parse(self.args.data)
data['editor'] = pym.auth.const.ROOT_UID
data['mtime'] = datetime.datetime.now()
rs = authmgr.update_group(data)
self._print(self._db_data_to_list([rs])[0])
def delete_group(self):
authmgr.delete_group(self.args.id)
def list_group_members(self):
pass
# from pym.auth.models import User, Group, GroupMember
# groups = self._build_query(Group)
# fields = ['id', 'group_id', 'group', 'user_id', 'principal',
# 'is_enabled', 'is_blocked', 'owner', 'ctime']
# data = {}
# for gr in groups:
# grdat = {
# 'user'
# }
# self._print(data)
def create_group_member(self):
data = self._parse(self.args.data)
data['owner'] = pym.auth.const.ROOT_UID
rs = authmgr.create_group_member(data)
self._print(self._db_data_to_list([rs])[0])
def delete_group_member(self):
authmgr.delete_group_member(self.args.id)
def _build_query(self, entity):
sess = pym.models.DbSession()
if isinstance(entity, list):
entities = entity
entity = entities[0]
else:
entities = [entity]
qry = sess.query(entities)
if self.args.idlist:
qry = qry.filter(entity.id.in_(self.args.idlist))
else:
if self.args.filter:
qry = qry.filter(self.args.filter)
if self.args.order:
qry = qry.order_by(self.args.order)
return qry
def parse_args(app_class, runner):
# Main parser
parser = argparse.ArgumentParser(description="""Pym command-line
interface.""",
epilog="""
Samples:
pym -c production.ini --format tsv list-group-members --order
'group_name, principal_principal' > /tmp/a.txt && gnumeric /tmp/a.txt
""")
app_class.add_parser_args(parser, (
('config', True),
('locale', False),
('format', False),
('alembic-config', False)
))
parser.add_argument('--dry-run', action="store_true",
help="The database changes will be rolled back.")
subparsers = parser.add_subparsers(title="Commands", dest="subparser_name",
help="""Type 'pym COMMAND --help'""")
# Parent parser for DB editing
parser_db_edit = argparse.ArgumentParser(description="Database editing",
add_help=False)
parser_db_edit.add_argument('data',
help="The data. For updates, field ID must be present.")
# Parent parser for DB deleting
parser_db_delete = argparse.ArgumentParser(description="Database deleting",
add_help=False)
parser_db_delete.add_argument('id', type=int,
help="The ID")
# Parent parser for DB listers
parser_db_lister = argparse.ArgumentParser(description="Database lister",
add_help=False)
parser_db_lister.add_argument('idlist', nargs='*', type=int, metavar='ID',
help="""Filter by these IDs""")
parser_db_lister.add_argument('--filter',
help="""Define filter with literal SQL (WHERE clause, e.g. 'id between
200 and 300')""")
parser_db_lister.add_argument('--order',
help="""Define sort order with literal SQL (ORDER BY clause, e.g. 'name
DESC')""")
# Parser cmd list-users
parser_list_users = subparsers.add_parser('list-users',
parents=[parser_db_lister],
help="List users")
parser_list_users.set_defaults(func=runner.list_users)
parser_list_users_with_groups = subparsers.add_parser('list-users-with-groups',
parents=[parser_db_lister],
help="List users with their groups.")
parser_list_users_with_groups.set_defaults(func=runner.list_users_with_groups)
# Parser cmd create-user
parser_create_user = subparsers.add_parser('create-user',
parents=[parser_db_edit],
help="Create user",
epilog="""You might want to try command 'list-users'
to see which fields are available."""
)
parser_create_user.set_defaults(func=runner.create_user)
# Parser cmd update-user
parser_update_user = subparsers.add_parser('update-user',
parents=[parser_db_edit],
help="Update user with given ID",
epilog="""You might want to try command 'list-users'
to see which fields are available."""
)
parser_update_user.set_defaults(func=runner.update_user)
# Parser cmd delete-user
parser_delete_user = subparsers.add_parser('delete-user',
parents=[parser_db_delete],
help="Delete user with given ID",
)
parser_delete_user.set_defaults(func=runner.delete_user)
# Parser cmd list-groups
parser_list_groups = subparsers.add_parser('list-groups',
parents=[parser_db_lister],
help="List groups")
parser_list_groups.set_defaults(func=runner.list_groups)
parser_list_groups_with_members = subparsers.add_parser('list-groups-with-members',
parents=[parser_db_lister],
help="List groups with their members.")
parser_list_groups_with_members.set_defaults(func=runner.list_groups_with_members)
# Parser cmd create-group
parser_create_group = subparsers.add_parser('create-group',
parents=[parser_db_edit],
help="Create group")
parser_create_group.set_defaults(func=runner.create_group)
# Parser cmd update-group
parser_update_group = subparsers.add_parser('update-group',
parents=[parser_db_edit],
help="Update group with given ID")
parser_update_group.set_defaults(func=runner.update_group)
# Parser cmd delete-group
parser_delete_group = subparsers.add_parser('delete-group',
parents=[parser_db_delete],
help="Delete group with given ID")
parser_delete_group.set_defaults(func=runner.delete_group)
# Parser cmd list-group-members
parser_list_group_members = subparsers.add_parser('list-group-members',
parents=[parser_db_lister],
help="List group-members")
parser_list_group_members.set_defaults(func=runner.list_group_members)
# Parser cmd create-group-member
parser_create_group_member = subparsers.add_parser('create-group-member',
parents=[parser_db_edit],
help="Create group-member")
parser_create_group_member.set_defaults(func=runner.create_group_member)
# Parser cmd delete-group-member
parser_delete_group_member = subparsers.add_parser('delete-group-member',
parents=[parser_db_delete],
help="Delete group-member with given ID")
parser_delete_group_member.set_defaults(func=runner.delete_group_member)
return parser.parse_args()
def main(argv=None):
if not argv:
argv = sys.argv
start_time = time.time()
app_name = os.path.basename(argv[0])
lgg = logging.getLogger('cli.' + app_name)
runner = PymCli()
args = parse_args(PymCli, runner)
runner.init_app(args, lgg=lgg, setup_logging=True)
transaction.begin()
# noinspection PyBroadException
try:
args.func()
except Exception as exc:
transaction.abort()
lgg.exception(exc)
lgg.fatal('Changes rolled back.')
lgg.fatal('Program aborted!')
else:
if args.dry_run:
transaction.abort()
lgg.info('Dry-run. Changes rolled back.')
else:
transaction.commit()
lgg.info('Changes committed.')
finally:
lgg.info('{} secs.'.format(time.time() - start_time))
# Do some cleanup or saving etc.
|
[
"dmakowski@gmx.net"
] |
dmakowski@gmx.net
|
4cca625f6ace6606c02435c3a6d04ccab7d31b8e
|
f29bae8e7440e99491f19cf2f28f92b816389628
|
/users/admin.py
|
a4dd70ed94f69a0d66cd9ef43502b1fab2d99640
|
[] |
no_license
|
AmulMittal/GrocerizeR
|
70b731b13088e228220df43c9016886dccf02c48
|
dcd6c0cd7cfef6f07cd368cee2d6b6ca111a2214
|
refs/heads/master
| 2023-05-31T23:32:33.480593
| 2021-06-09T09:11:45
| 2021-06-09T09:11:45
| 374,734,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
# users/admin.py
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from django.contrib import admin
from users.forms import UserChangeForm, UserCreationForm
from users.models import User
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password', 'first_name', 'last_name',)}),
('Permissions', {'fields': ('is_admin',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
class Meta:
model = User
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
|
[
"amul.smart@gmail.com"
] |
amul.smart@gmail.com
|
552e505788d948bf31399ff002e7d8a56d6784b2
|
df5fd00119006a2ffb6d18a8464d8c48e20f4c8c
|
/uzdevumi/uzd_05_03.py
|
ebe49ed75cc7f1a5096aecfd8a76fe981e2d86d4
|
[] |
no_license
|
Spector255/izdruka
|
0dccbf6d807336f7fe72f9b1f60a6b8ccdd89f2d
|
18371da7851c94ff93930f308841aba609cc3e4a
|
refs/heads/master
| 2023-04-26T10:15:59.838338
| 2021-05-27T08:56:43
| 2021-05-27T08:56:43
| 328,662,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,338
|
py
|
#Klients pieejot pie kases norāda, vai ir pieejama klienta karte.
#Klients pērk N preces. Klientu karte, katrai precei dod 11% atlaidi.
#Ja pērk vismaz 3 gabalus vai 3 kg ir 5% atlaide, taču, ja ir arī klientu karte tad 15% atlaide.
#Klients ievada preces nosaukumu, cenu (bez atlaides) un daudzumu.
#Programma izvada pilnā teikumā preces nosaukumu, apjomu, cenu un cenu par konkrēto daudzumu. Ja ir pieejama klientu karte izvada cenu ar atlaidi.
#Beigās norāda cik jāmaksā par visu pirkumu kopā, kā arī, kāds ir kopējais ietaupījums.
karte = input(
'Labdien! Ja Jums ir klienta karte, tad ievadiet "+", ja nav, tad ievadiet "-" : '
)
produktusk = int(input('Lūdzu ievadiet produktu skaitu grozā: '))
summapp = 0.0
for i in range(produktusk):
produkts_nos = input('Ievadiet preces nosaukumu: ')
produkts_cen = float(input('Ievadiet preces cenu: '))
sver = input(
'Ievadiet "Jā", ja tas produksts ir svērams, vai ievadiet "Nē", ja tas nav svērama produkcija: '
)
while sver != "Jā" and sver != "Nē":
print("Nekorēkti dati")
sver = input(
'Ievadiet "Jā", ja tas produksts ir svērams, vai ievadiet "Nē", ja tas nav svērama produkcija: '
)
if sver == "Jā":
produkts_dau = float(input('Ievadiet preces daudzumu: '))
else:
produkts_dau = int(input('Ievadiet preces daudzumu: '))
if produkts_dau >= 3.0 and karte == "+":
produkts_kopsumma = ((produkts_dau * produkts_cen) / 100) * 85
produkts_kopsumma = round(produkts_kopsumma, 2)
elif produkts_dau >= 3.0 and karte == "-":
produkts_kopsumma = ((produkts_dau * produkts_cen) / 100) * 95
produkts_kopsumma = round(produkts_kopsumma, 2)
elif produkts_dau <= 3.0 and karte == "+":
produkts_kopsumma = ((produkts_dau * produkts_cen) / 100) * 89
produkts_kopsumma = round(produkts_kopsumma, 2)
else:
produkts_kopsumma = produkts_dau * produkts_cen
produkts_kopsumma = round(produkts_kopsumma, 2)
summapp = summapp + produkts_kopsumma
print(
f"Jūs izvēlejaties {produkts_nos}, {produkts_dau},par {produkts_cen}€, kopā par šo produkstu Jūms jāsamaksā {produkts_kopsumma}€"
)
summapp=round(summapp,2)
print()
print(f"Kopā Jums par pirkumu jāsamksa {summapp} €")
|
[
"daniks255@inbox.lv"
] |
daniks255@inbox.lv
|
c4d4aada8fe620856ab682e25557de2e5034c076
|
d042706db30773bd677c6c8d1eb90018391b1ebf
|
/food/migrations/0011_remove_userprofile_userid.py
|
1e4271272a59f284183ecdeaa5d2a23b3ab486ad
|
[] |
no_license
|
prolaysaha/Canteen-Management-System-Django
|
dee94b7858491351edae6f4b5325f6c3dbcec984
|
c4bdca299ffb062e76604d391a0bf088f332a6b8
|
refs/heads/main
| 2023-08-20T22:33:12.531929
| 2021-10-17T17:40:33
| 2021-10-17T17:40:33
| 418,207,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Generated by Django 3.0.5 on 2020-04-11 19:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('food', '0010_userprofile'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='userid',
),
]
|
[
"prolaysaha68@gmail.com"
] |
prolaysaha68@gmail.com
|
22818600f9f264fa75297d122dea89347b0822ea
|
d3e0794ab3654393c11d227357701762142b626e
|
/user001/scripts/dash/screens/scheduler/objects.py
|
2424c5093ffabee7b0ddf41202dd2a7f19bd6f54
|
[] |
no_license
|
aleksProsk/projectHydroOpt
|
09604096847cc0726eda6a912f7a1f30a4f5bac6
|
aa6f6b7c98567c66f2d0ed60bc5621e5b9bbbb90
|
refs/heads/master
| 2020-03-26T11:12:16.966021
| 2018-09-11T15:37:30
| 2018-09-11T15:37:30
| 144,833,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
objects = [
{
'object': 'reservoirInputTypeDropdown',
'type': 'CDropdown',
},
{
'object': 'performanceSettingsDropdown',
'type': 'CDropdown',
},
{
'object': 'reservoirInputTypeDropdown',
'type': 'CDropdown',
},
{
'object': 'datePicker',
'type': 'CDatePickerRange',
}
]
|
[
"alexandriksasha@mail.ru"
] |
alexandriksasha@mail.ru
|
a4a55a6035f40c5b04aa62febab0769feb9879e7
|
5483f140c3ae34304cd85fe61b70259b688ff455
|
/propiedades/migrations/0001_initial.py
|
a3173b9d4b7fe238042e50bd273b6cd74dd9602b
|
[] |
no_license
|
inframazing/truehome-test-api
|
d2b72e4d22213430f6d9979c49f9d51e7ff8ed6a
|
29e5000f8cee76fe1ee9b96dae0ac18828a91fc3
|
refs/heads/master
| 2020-12-21T17:57:31.546592
| 2020-01-27T14:48:14
| 2020-01-27T14:48:14
| 236,512,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
# Generated by Django 3.0.2 on 2020-01-25 23:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Propiedad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=100)),
('square_meters', models.PositiveSmallIntegerField()),
('email', models.EmailField(max_length=50)),
],
),
]
|
[
"inframazing@gmail.com"
] |
inframazing@gmail.com
|
393fb345917b7098f1abe268a275728ddb549849
|
92efbf5b96cb59c29bc0e20da5f9cf2ff67d5e13
|
/chapter4/4.6/HelloProj/com/pkg1/hello.py
|
7c5d28aabd3c70de9870cd293471bed102718581
|
[] |
no_license
|
chawlau/python_expert
|
474cfcecf8093543cb593078a6b9c2b32de7fd98
|
3261a33661576a0da41119c967e074cd6e3abd69
|
refs/heads/master
| 2020-12-06T03:50:34.805500
| 2020-01-07T13:39:59
| 2020-01-07T13:39:59
| 232,332,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# coding=utf-8
# 代码文件:chapter4/4.5/com/pkg1/hello.py
import com.pkg2.hello as module1
from com.pkg2.hello import z
y = 20
print(y) # 访问当前模块变量y
print(module1.y) # 访问com.pkg2.hello模块变量y
print(z) # 访问com.pkg2.hello模块变量z
|
[
"xiaonongyue@gmail.com"
] |
xiaonongyue@gmail.com
|
2f2962ea25e9f936fbeeefc8d47ccaa3fd482996
|
007cd60ca84623c5ace8b50e846be659ae2e8c85
|
/collectors/icdpcs/record.py
|
7684e370c580ba310af74e8cd2de5a1b7fa0b18d
|
[
"MIT"
] |
permissive
|
kenferrara/collectors
|
412a97f0475747a206cbe68890774c0c37e9fc1a
|
e6c1f45df3a1ffd5d60dada1816484812eb51417
|
refs/heads/master
| 2023-04-16T22:01:19.899692
| 2020-04-23T02:31:24
| 2020-04-23T02:31:24
| 258,073,992
| 0
| 0
|
MIT
| 2023-04-04T00:19:23
| 2020-04-23T02:30:59
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .. import base
from ..base.fields import Text, Date, Boolean
# Module API
class Record(base.Record):
# Config
table = 'icdpcs'
# General
code = Text(primary_key=True)
is_header = Boolean('0')
short_description = Text()
long_description = Text()
version = Text()
last_updated = Date('%Y-%m-%d')
|
[
"roll@post.agency"
] |
roll@post.agency
|
89bdc85d37ffd1c71be7f5b5ef909ca3a1c4a9bf
|
7797913bc78cfa0cfdb30813d4268e02770722e0
|
/Section 1/Q34.py
|
a7f7c3ecae5ac98dcb54dcf7271b5078b9d8f0eb
|
[] |
no_license
|
pkoi5088/CodeFestival
|
ecdf5dbdbc78793fd093a614f60d2639a68bffb3
|
cdc3065f6a2557936432f09344d5bfa09ff8b651
|
refs/heads/main
| 2023-03-19T14:49:02.488625
| 2021-03-14T03:20:15
| 2021-03-14T03:20:15
| 345,632,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
n=list(map(int,input().split()))
print('YES' if n==sorted(n) else 'NO')
|
[
"pkoi5088@gmail.com"
] |
pkoi5088@gmail.com
|
3ce08fbb326978a3db114eee111616ede922b3a7
|
d86b94b9fa849c4421f60bfbc455bb3679e3dbf9
|
/CP/codificacao.py
|
c5869cada9a4eeac869edee4acea0556a434d5f2
|
[] |
no_license
|
marciorgb/computacao-evolutiva
|
048749f82ccb052620a0f053b847520e33128a60
|
af72ea26db598b7bde431309cd6949ccc422c9ec
|
refs/heads/master
| 2020-06-04T08:56:15.511638
| 2019-06-14T14:31:03
| 2019-06-14T14:31:03
| 191,953,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
from statistics import mean
from codificacao import start_population
from fitness import *
from Seleção import *
from cruzamento import *
from substituição import *
from graficos import *
import matplotlib.pyplot as plt
import random as r
import copy
import time
def start_conjunct (conjunct_size):
"""inicializa solução"""
conjunct = []
for i in range(conjunct_size):
conjunct.append( r.randint(0, 1))
return conjunct
def conjunct_is_invalid (population, last):
"""verifica validade do conjunto"""
for i, val in enumerate(population):
if population[i] == population[last] and i != last:
return True
return False
def start_population (conjunct_size, solution_size):
"""inicializa população de soluções"""
population = []
for i in range(solution_size):
population.append( start_conjunct(conjunct_size) )
while conjunct_is_invalid(population, i): #verifica se o conjunto já existe
population.pop()
population.append( start_conjunct(conjunct_size) )
return population
|
[
"marcio@aptans.com"
] |
marcio@aptans.com
|
8782fd2774cf2e8fc8888edfa047ffb1f2444c9d
|
9f99f103281817b7d8bbe0df0c0c68dd2c6b9b06
|
/tests/test_blog.py
|
31cf02a5197dd6180e75164375dc637a2f92cab3
|
[] |
no_license
|
heyitsM/Flaskr-with-behave
|
a2a6ad59028990200ce65f09df75a7544542331a
|
4356755a392b152cac3abfcddeaf6b9b346b1b1a
|
refs/heads/master
| 2023-06-26T20:42:15.062659
| 2021-07-20T19:43:21
| 2021-07-20T19:43:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import pytest
from db import get_db
def test_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the post author to another user
with app.app_context():
db = get_db()
db.execute('UPDATE post SET author_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's post
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('/create', data={'title': 'created', 'body': ''})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM post').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'title': 'updated', 'body': ''})
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post['title'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def test_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'title': '', 'body': ''})
assert b'Title is required.' in response.data
def test_delete(client, auth, app):
auth.login()
response = client.post('/1/delete')
assert response.headers['Location'] == 'http://localhost/'
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post is None
|
[
"edeitrick@codio.com"
] |
edeitrick@codio.com
|
df73d08c2176b58e1ab3447f7aebf4509f80e0dd
|
44fafa3e65a5af76297b889a3c7dad87270b1c81
|
/sentinelle/Order.py
|
5943314e411a83836b060de97aab8d2f5d140ce9
|
[] |
no_license
|
luciannno/renaissance
|
501c045280393c5d61b549ee1f7f28eeb78709df
|
94c0f6b90f70451419249964885a14486bf30e82
|
refs/heads/master
| 2021-06-30T11:35:14.545272
| 2017-01-19T11:10:16
| 2017-01-19T11:10:16
| 56,840,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
import abc
class Order:
"""
"""
__metaclass__ = abc.ABCMeta
instrument = None
side = None
orderType = None
stopPrice = None
trailStopPrice = None
suggested = False
quantity = 0
def __init__(self, instrument, side, orderType, stopPrice=None, trailStopPrice=None):
self.instrument = instrument
self.side = side
self.orderType = orderType
self.stopPrice = stopPrice
self.trailStopPrice = trailStopPrice
def isSuggested(self):
return self.suggested
def __repr__(self):
return "This is an order for %s" % (self.instrument)
class SuggestedOrder(Order):
"""
"""
def __init__(self, instrument, side, orderType, stopPrice=None, trailStopPrice=None):
super(SuggestedOrder, self).__init__(instrument, side, orderType, stopPrice, trailStopPrice)
#Only RiskManager Should be able to remove the suggested flag
self.suggested = True
|
[
"lbrunette@servidor.(none)"
] |
lbrunette@servidor.(none)
|
56c26d37646620c5a72bd601ee1e5fa9ab81a06d
|
e25d02365d14e91ffef84e9f8695c198c4ad6184
|
/setup.py
|
241aafd3aa37d81658c479a022d7f4c8bae7eac6
|
[] |
no_license
|
MahmoudMTaha/ITI_PRO_Project
|
f9f8bbe12222376f7002569946f5906b6d0ec878
|
d01796f93b361f87c6966c74dd69ef212ccbfddb
|
refs/heads/main
| 2023-08-08T08:25:00.127169
| 2021-09-12T17:41:11
| 2021-09-12T17:41:11
| 405,710,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='project implemented in dr denna session',
author='Mahmoud Taha',
license='',
)
|
[
"43166822+MahmoudMTaha@users.noreply.github.com"
] |
43166822+MahmoudMTaha@users.noreply.github.com
|
2d6cc4da009245aa30319aa0653b7d5b9afef22e
|
f8090ce7564d0b6396adb0f940e8a83b2135d11f
|
/ipdpw/chrI/sort/dataframe.py
|
aa3c0be780fc3cb3b9c220e5a468607e0f3da3ee
|
[] |
no_license
|
zengchan/ipd-pw-code
|
013986b8eb4a29d2ee67b117ef4863a9e8b1f564
|
143710ec6b66c1cf64ab72ac1a46bbe1cb09b756
|
refs/heads/master
| 2020-03-28T03:16:57.625279
| 2018-09-06T07:45:04
| 2018-09-06T07:45:04
| 147,627,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
#coding:utf8
import sys
from pandas import DataFrame #DataFrame通常来装二维的表格
import pandas as pd #pandas是流行的做数据分析的包
import numpy as np
#建立字典,键和值都从文件里读出来。键是nam,age……,值是lili,jim……
data = pd.read_table('chrI_ipd_pw_HeChuan36c_ccssubsort.txt', sep='\t',names=["pos","read","SlnIndex","ipd","pw"],usecols=[0,1,3,5,6])
df = DataFrame(data,columns=["pos","read","SlnIndex","ipd","pw"])
#print(df)
#把DataFrame输出到一个表,不要行名字和列名字
#df.to_csv('extract.xlsx',index=False,header=["pos","read","SlnIndex","ipd","pw"],sep='\t')
#df.to_csv('extract.txt',index=False,header=["pos","read","SlnIndex","ipd","pw"],sep='\t')
df1=df.sort_values(by=['pos','read'])
#print(df1)
df1.to_csv('sort.xlsx',index=False,header=["pos","read","SlnIndex","ipd","pw"],sep='\t')
df1.to_csv('sort.txt',index=False,header=["pos","read","SlnIndex","ipd","pw"],sep='\t')
|
[
"chanzengupup@163.com"
] |
chanzengupup@163.com
|
63e199ac8e6c06d09b648c691e651c8c76258e47
|
06c6056d1f7eea23a269b117f30bf8bdc0bd88a3
|
/train_lr.py
|
01be440c88e62ff433c2a24a2e19d64d132f4763
|
[] |
no_license
|
gjy3035/PCC-Net
|
3e851e32717c52b6dbc2ceb86c650f3784d90b6d
|
5beecd25cc551e2ac3248622abc950d09b526953
|
refs/heads/ori_pt1_py3
| 2020-04-12T13:08:00.241032
| 2019-11-29T05:29:16
| 2019-11-29T05:29:16
| 162,512,887
| 77
| 18
| null | 2020-03-12T11:12:23
| 2018-12-20T02:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 8,454
|
py
|
from tensorboardX import SummaryWriter
import os
import random
import torch
from torch import optim
from torch.autograd import Variable
from torch.nn import NLLLoss2d
from torch.optim.lr_scheduler import StepLR
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
from models.CC import CrowdCounter
from config import cfg
from loading_data import loading_data
from misc.utils import *
from misc.timer import Timer
import pdb
exp_name = cfg.TRAIN.EXP_NAME
writer = SummaryWriter(cfg.TRAIN.EXP_PATH+ '/' + exp_name)
log_txt = cfg.TRAIN.EXP_PATH + '/' + exp_name + '/' + exp_name + '.txt'
if not os.path.exists(cfg.TRAIN.EXP_PATH):
os.mkdir(cfg.TRAIN.EXP_PATH)
pil_to_tensor = standard_transforms.ToTensor()
train_record = {'best_mae': 1e20, 'mse':1e20,'corr_loss': 0, 'corr_epoch': -1, 'best_model_name': ''}
train_set, train_loader, val_set, val_loader, restore_transform = loading_data()
_t = {'iter time' : Timer(),'train time' : Timer(),'val time' : Timer()}
rand_seed = cfg.TRAIN.SEED
if rand_seed is not None:
np.random.seed(rand_seed)
torch.manual_seed(rand_seed)
torch.cuda.manual_seed(rand_seed)
def main():
cfg_file = open('./config.py',"r")
cfg_lines = cfg_file.readlines()
with open(log_txt, 'a') as f:
f.write(''.join(cfg_lines) + '\n\n\n\n')
torch.cuda.set_device(cfg.TRAIN.GPU_ID[0])
torch.backends.cudnn.benchmark = True
net = CrowdCounter(ce_weights=train_set.wts)
net.train()
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if 'seg' in name], 'lr': cfg.TRAIN.SEG_LR},
{'params': [param for name, param in net.named_parameters() if 'seg' not in name], 'lr': cfg.TRAIN.LR}
])
i_tb = 0
for epoch in range(cfg.TRAIN.MAX_EPOCH):
_t['train time'].tic()
i_tb,model_path = train(train_loader, net, optimizer, epoch, i_tb)
_t['train time'].toc(average=False)
print( 'train time of one epoch: {:.2f}s'.format(_t['train time'].diff) )
if epoch%cfg.VAL.FREQ!=0:
continue
_t['val time'].tic()
validate(val_loader, model_path, epoch, restore_transform)
_t['val time'].toc(average=False)
print( 'val time of one epoch: {:.2f}s'.format(_t['val time'].diff))
def train(train_loader, net, optimizer, epoch, i_tb):
for i, data in enumerate(train_loader, 0):
_t['iter time'].tic()
img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data
for i_img in range(cfg.TRAIN.BATCH_SIZE):
roi[i_img,:,0] = i_img
roi = roi.view(cfg.TRAIN.BATCH_SIZE*cfg.TRAIN.NUM_BOX,5)
gt_roi = gt_roi.view(cfg.TRAIN.BATCH_SIZE*cfg.TRAIN.NUM_BOX,10)
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
roi = Variable(roi).cuda().float()
gt_roi = Variable(gt_roi).cuda()
gt_seg = Variable(gt_seg).cuda()
optimizer.zero_grad()
pred_map,pred_cls, pred_seg = net(img, gt_map, roi, gt_roi, gt_seg)
loss = net.loss
# pdb.set_trace()
loss.backward()
optimizer.step()
if (i + 1) % cfg.TRAIN.PRINT_FREQ == 0:
loss1,loss2,loss3 = net.f_loss()
i_tb = i_tb + 1
writer.add_scalar('train_loss_mse', loss1.item(), i_tb)
writer.add_scalar('train_loss_cls', loss2.item(), i_tb)
writer.add_scalar('train_loss_seg', loss3.item(), i_tb)
writer.add_scalar('train_loss', loss.item(), i_tb)
_t['iter time'].toc(average=False)
print( '[ep %d][it %d][loss %.8f %.8f %.4f %.4f][%.2fs]' % \
(epoch + 1, i + 1, loss.item(), loss1.item(), loss2.item(), loss3.item(), _t['iter time'].diff) )
# pdb.set_trace()
print( ' [cnt: gt: %.1f pred: %.6f]' % (gt_cnt[0]/cfg.DATA.DEN_ENLARGE, pred_map[0,:,:,:].sum().item()/cfg.DATA.DEN_ENLARGE) )
snapshot_name = 'all_ep_%d' % (epoch + 1)
# save model
to_saved_weight = []
if len(cfg.TRAIN.GPU_ID)>1:
to_saved_weight = net.module.state_dict()
else:
to_saved_weight = net.state_dict()
model_path = os.path.join(cfg.TRAIN.EXP_PATH, exp_name, snapshot_name + '.pth')
torch.save(to_saved_weight, model_path)
return i_tb,model_path
def validate(val_loader, model_path, epoch, restore):
net = CrowdCounter(ce_weights=train_set.wts)
net.load_state_dict(torch.load(model_path))
net.cuda()
net.eval()
print( '='*50 )
val_loss_mse = []
val_loss_cls = []
val_loss_seg = []
val_loss = []
mae = 0.0
mse = 0.0
for vi, data in enumerate(val_loader, 0):
img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data
# pdb.set_trace()
with torch.no_grad():
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
gt_seg = Variable(gt_seg).cuda()
roi = Variable(roi[0]).cuda().float()
gt_roi = Variable(gt_roi[0]).cuda()
pred_map,pred_cls,pred_seg = net(img, gt_map, roi, gt_roi, gt_seg)
loss1,loss2,loss3 = net.f_loss()
val_loss_mse.append(loss1.item())
val_loss_cls.append(loss2.item())
val_loss_seg.append(loss3.item())
val_loss.append(net.loss.item())
pred_map = pred_map.data.cpu().numpy()/cfg.DATA.DEN_ENLARGE
gt_map = gt_map.data.cpu().numpy()/cfg.DATA.DEN_ENLARGE
pred_seg = pred_seg.cpu().max(1)[1].squeeze_(1).data.numpy()
gt_seg = gt_seg.data.cpu().numpy()
gt_count = np.sum(gt_map)
pred_cnt = np.sum(pred_map)
mae += abs(gt_count-pred_cnt)
mse += ((gt_count-pred_cnt)*(gt_count-pred_cnt))
x = []
if vi==0:
for idx, tensor in enumerate(zip(img.cpu().data, pred_map, gt_map, pred_seg, gt_seg)):
if idx>cfg.VIS.VISIBLE_NUM_IMGS:
break
# pdb.set_trace()
pil_input = restore(tensor[0]/255.)
pil_label = torch.from_numpy(tensor[2]/(tensor[2].max()+1e-10)).repeat(3,1,1)
pil_output = torch.from_numpy(tensor[1]/(tensor[1].max()+1e-10)).repeat(3,1,1)
pil_gt_seg = torch.from_numpy(tensor[4]).repeat(3,1,1).float()
pil_pred_seg = torch.from_numpy(tensor[3]).repeat(3,1,1).float()
# pdb.set_trace()
x.extend([pil_to_tensor(pil_input.convert('RGB')), pil_label, pil_output, pil_gt_seg, pil_pred_seg])
x = torch.stack(x, 0)
x = vutils.make_grid(x, nrow=5, padding=5)
writer.add_image(exp_name + '_epoch_' + str(epoch+1), (x.numpy()*255).astype(np.uint8))
mae = mae/val_set.get_num_samples()
mse = np.sqrt(mse/val_set.get_num_samples())
'''
loss1 = float(np.mean(np.array(val_loss_mse)))
loss2 = float(np.mean(np.array(val_loss_cls)))
loss3 = float(np.mean(np.array(val_loss_seg)))
loss = float(np.mean(np.array(val_loss)))'''
loss1 = np.mean(val_loss_mse)
loss2 = np.mean(val_loss_cls)
loss3 = np.mean(val_loss_seg)
loss = np.mean(val_loss)
writer.add_scalar('val_loss_mse', loss1, epoch + 1)
writer.add_scalar('val_loss_cls', loss2, epoch + 1)
writer.add_scalar('val_loss_seg', loss3, epoch + 1)
writer.add_scalar('val_loss', loss, epoch + 1)
writer.add_scalar('mae', mae, epoch + 1)
writer.add_scalar('mse', mse, epoch + 1)
if mae < train_record['best_mae']:
train_record['best_mae'] = mae
train_record['mse'] = mse
train_record['corr_epoch'] = epoch + 1
train_record['corr_loss'] = loss
print( '='*50 )
print( exp_name )
print( ' '+ '-'*20 )
print( ' [mae %.1f mse %.1f], [val loss %.8f %.8f %.4f %.4f]' % (mae, mse, loss, loss1, loss2, loss3) )
print( ' '+ '-'*20 )
# pdb.set_trace()
print( '[best] [mae %.1f mse %.1f], [loss %.8f], [epoch %d]' % (train_record['best_mae'], train_record['mse'], train_record['corr_loss'], train_record['corr_epoch']) )
print( '='*50 )
if __name__ == '__main__':
main()
|
[
"gjy3035@gmail.com"
] |
gjy3035@gmail.com
|
52a387e2fda6b62173da5b67d8660f46997da921
|
c0b6da793db606476bd601b5c1e5671a0fdeaa1c
|
/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/units/quantity_helper.py
|
d005898fdd3c303c5832535be87daba4dac81d7f
|
[] |
no_license
|
waynebhayes/SpArcFiRe
|
201ba0a3724a4349568dfae6c7f7d7952caa2565
|
6d95873226badedbb14f9c355365d692c7b3ea03
|
refs/heads/master
| 2023-06-08T17:27:21.319312
| 2023-05-28T00:06:32
| 2023-05-28T00:06:32
| 120,016,157
| 8
| 23
| null | 2023-09-08T17:21:55
| 2018-02-02T18:26:09
|
Python
|
UTF-8
|
Python
| false
| false
| 25,204
|
py
|
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
from fractions import Fraction
import numpy as np
from .core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
UFUNC_HELPERS = {}
# In this file, we implement the logic that determines for a given ufunc and
# input how the input should be scaled and what unit the output will have.
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS = set([np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or,
np.logical_xor, np.logical_not])
if isinstance(getattr(np, 'isnat', None), np.ufunc):
UNSUPPORTED_UFUNCS |= {np.isnat}
# SINGLE ARGUMENT UFUNCS
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the function should be two values: the
# scale by which the input needs to be multiplied before being passed to the
# ufunc, and the unit the output will be in.
# ufuncs that return a boolean and do not care about the unit
helper_onearg_test = lambda f, unit: ([None], None)
UFUNC_HELPERS[np.isfinite] = helper_onearg_test
UFUNC_HELPERS[np.isinf] = helper_onearg_test
UFUNC_HELPERS[np.isnan] = helper_onearg_test
UFUNC_HELPERS[np.sign] = helper_onearg_test
UFUNC_HELPERS[np.signbit] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
helper_invariant = lambda f, unit: ([None], _d(unit))
UFUNC_HELPERS[np.absolute] = helper_invariant
UFUNC_HELPERS[np.fabs] = helper_invariant
UFUNC_HELPERS[np.conj] = helper_invariant
UFUNC_HELPERS[np.conjugate] = helper_invariant
UFUNC_HELPERS[np.negative] = helper_invariant
UFUNC_HELPERS[np.spacing] = helper_invariant
UFUNC_HELPERS[np.rint] = helper_invariant
UFUNC_HELPERS[np.floor] = helper_invariant
UFUNC_HELPERS[np.ceil] = helper_invariant
UFUNC_HELPERS[np.trunc] = helper_invariant
# positive only was added in numpy 1.13
if isinstance(getattr(np, 'positive', None), np.ufunc):
UFUNC_HELPERS[np.positive] = helper_invariant
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = lambda f, unit: (
[None], unit ** 0.5 if unit is not None else dimensionless_unscaled)
UFUNC_HELPERS[np.square] = lambda f, unit: (
[None], unit ** 2 if unit is not None else dimensionless_unscaled)
UFUNC_HELPERS[np.reciprocal] = lambda f, unit: (
[None], unit ** -1 if unit is not None else dimensionless_unscaled)
# cbrt only was added in numpy 1.10
if isinstance(getattr(np, 'cbrt', None), np.ufunc):
UFUNC_HELPERS[np.cbrt] = lambda f, unit: (
[None], (unit ** Fraction(1, 3) if unit is not None
else dimensionless_unscaled))
UFUNC_HELPERS[np.core.umath._ones_like] = (lambda f, unit:
([None], dimensionless_unscaled))
# ufuncs that require dimensionless input and and give dimensionless output
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
UFUNC_HELPERS[np.exp] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.expm1] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.exp2] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.log] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.log10] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.log2] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.log1p] = helper_dimensionless_to_dimensionless
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
UFUNC_HELPERS[np.modf] = helper_modf
# ufuncs that require dimensionless input and give output in radians
def helper_dimensionless_to_radian(f, unit):
from .si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
UFUNC_HELPERS[np.arccos] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arcsin] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arctan] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arccosh] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arcsinh] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arctanh] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
def helper_degree_to_radian(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
UFUNC_HELPERS[np.radians] = helper_degree_to_radian
UFUNC_HELPERS[np.deg2rad] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
def helper_radian_to_degree(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
UFUNC_HELPERS[np.degrees] = helper_radian_to_degree
UFUNC_HELPERS[np.rad2deg] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
def helper_radian_to_dimensionless(f, unit):
from .si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
UFUNC_HELPERS[np.cos] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.sin] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.tan] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.cosh] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.sinh] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.tanh] = helper_radian_to_dimensionless
# ufuncs that require dimensionless_unscaled input and return non-quantities
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
UFUNC_HELPERS[np.multiply] = helper_multiplication
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
UFUNC_HELPERS[np.power] = helper_power
# float_power was added in numpy 1.12
if isinstance(getattr(np, 'float_power', None), np.ufunc):
UFUNC_HELPERS[np.float_power] = helper_power
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
UFUNC_HELPERS[np.ldexp] = helper_ldexp
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
UFUNC_HELPERS[np.copysign] = helper_copysign
# heaviside only was added in numpy 1.13
if isinstance(getattr(np, 'heaviside', None), np.ufunc):
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
UFUNC_HELPERS[np.heaviside] = helper_heaviside
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
UFUNC_HELPERS[np.logaddexp] = helper_two_arg_dimensionless
UFUNC_HELPERS[np.logaddexp2] = helper_two_arg_dimensionless
def get_converters_and_unit(f, *units):
converters = [None, None]
# no units for any input -- e.g., np.add(a1, a2, out=q)
if all(unit is None for unit in units):
return converters, dimensionless_unscaled
fixed, changeable = (1, 0) if units[1] is None else (0, 1)
if units[fixed] is None:
try:
converters[changeable] = get_converter(units[changeable],
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[fixed] = False
return converters, units[changeable]
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(units[changeable],
units[fixed])
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, units[fixed]
def helper_twoarg_invariant(f, unit1, unit2):
return get_converters_and_unit(f, unit1, unit2)
UFUNC_HELPERS[np.add] = helper_twoarg_invariant
UFUNC_HELPERS[np.subtract] = helper_twoarg_invariant
UFUNC_HELPERS[np.hypot] = helper_twoarg_invariant
UFUNC_HELPERS[np.maximum] = helper_twoarg_invariant
UFUNC_HELPERS[np.minimum] = helper_twoarg_invariant
UFUNC_HELPERS[np.fmin] = helper_twoarg_invariant
UFUNC_HELPERS[np.fmax] = helper_twoarg_invariant
UFUNC_HELPERS[np.nextafter] = helper_twoarg_invariant
UFUNC_HELPERS[np.remainder] = helper_twoarg_invariant
UFUNC_HELPERS[np.mod] = helper_twoarg_invariant
UFUNC_HELPERS[np.fmod] = helper_twoarg_invariant
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
UFUNC_HELPERS[np.greater] = helper_twoarg_comparison
UFUNC_HELPERS[np.greater_equal] = helper_twoarg_comparison
UFUNC_HELPERS[np.less] = helper_twoarg_comparison
UFUNC_HELPERS[np.less_equal] = helper_twoarg_comparison
UFUNC_HELPERS[np.not_equal] = helper_twoarg_comparison
UFUNC_HELPERS[np.equal] = helper_twoarg_comparison
def helper_twoarg_invtrig(f, unit1, unit2):
from .si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
UFUNC_HELPERS[np.arctan2] = helper_twoarg_invtrig
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
UFUNC_HELPERS[np.core.umath._arg] = helper_twoarg_invtrig
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
# divmod only was added in numpy 1.13
if isinstance(getattr(np, 'divmod', None), np.ufunc):
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
UFUNC_HELPERS[np.divmod] = helper_divmod
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we even support this ufunc
if function in UNSUPPORTED_UFUNCS:
raise TypeError("Cannot use function '{0}' with quantities"
.format(function.__name__))
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# If the ufunc is supported, then we call a helper function (defined
# above) which returns a list of function(s) that converts the input(s)
# to the unit required for the ufunc, as well as the unit the output
# will have (this is a tuple of units if there are multiple outputs).
if function in UFUNC_HELPERS:
converters, result_unit = UFUNC_HELPERS[function](function, *units)
else:
raise TypeError("Unknown ufunc {0}. Please raise issue on "
"https://github.com/astropy/astropy"
.format(function.__name__))
if any(converter is False for converter in converters):
# for two-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
maybe_arbitrary_arg = args[converters.index(False)]
try:
if can_have_arbitrary_unit(maybe_arbitrary_arg):
converters = [None, None]
else:
raise UnitsError("Can only apply '{0}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {0}: "
"'{1}' and '{2}'"
.format(function.__name__,
args[0].__class__.__name__,
args[1].__class__.__name__))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
if method == 'at':
unit = getattr(args[0], 'unit', None)
units = [unit]
if function.nin == 2:
units.append(getattr(args[2], 'unit', None))
converters, result_unit = UFUNC_HELPERS[function](function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif (method in ('reduce', 'accumulate', 'reduceat') and
function.nin == 2):
unit = getattr(args[0], 'unit', None)
converters, result_unit = UFUNC_HELPERS[function](function,
unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in ('reduce', 'accumulate', 'reduceat',
'outer') and function.nin != 2:
raise ValueError("{0} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {0}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if converters[0] is not None or (unit is not None and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.)):
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{0} in {1} "
"instance".format(
(" from {0} function".format(function.__name__)
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{0}'{1} "
"in {2} instance. Use {3} instance instead."
.format(unit, (" from {0} function".format(function.__name__)
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot attain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{0}in a non-Quantity instance."
.format("" if function is None else
"resulting from {0} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(output.dtype))
return output
|
[
"dlcheng@andromeda-32.ics.uci.edu"
] |
dlcheng@andromeda-32.ics.uci.edu
|
f0f45357153653337e8856a9543794329435a92f
|
50dd2a43daa8316fc11e0c176b5872738fcc5dde
|
/Learning/130_Fluent_Python/fp2-utf8/freeinteractive/freeinteractive 124.py
|
42249e691553393de2f30d1ce5dfeb475bd97059
|
[] |
no_license
|
FrenchBear/Python
|
58204d368e3e72071eef298ff00d06ff51bd7914
|
b41ab4b6a59ee9e145ef2cd887a5fe306973962b
|
refs/heads/master
| 2023-08-31T18:43:37.792427
| 2023-08-26T15:53:20
| 2023-08-26T15:53:20
| 124,466,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
>>> my_object.foo += 1
|
[
"FrenchBear38@outlook.com"
] |
FrenchBear38@outlook.com
|
09fd080454a36295d593ea713a30fc521a400773
|
6f05485bc941a3d51383cdec2c3f886021565f88
|
/test_app.py
|
8012684ac1294b1bd66aa48023cfd6496f066de3
|
[] |
no_license
|
lfbatista/postcodes
|
bec1b8a2bfbcf0ed553957ec622e34934cc7287c
|
6793fbc4029f3770fb11a2057703acbb8cf0ca18
|
refs/heads/master
| 2022-11-12T15:18:15.084628
| 2020-06-25T20:18:13
| 2020-06-25T20:18:13
| 273,566,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import unittest
from app import get_stores
class TestApp(unittest.TestCase):
def setUp(self):
self.get_stores = get_stores
def test_get_stores(self):
""" :return a list of stores in a given radius of a given postcode sorted from north to south """
expected_output = [{'name': 'Hatfield Central 5JJ', 'postcode': 'AL9 5JJ'},
{'name': 'Hatfield Central 5JP', 'postcode': 'AL9 5JP'},
{'name': 'Hatfield Central 5JY', 'postcode': 'AL9 5JY'}]
self.assertEqual(self.get_stores('AL9 5JP', '150'), expected_output)
if __name__ == '__main__':
unittest.main()
|
[
"amail@mail.com"
] |
amail@mail.com
|
704328caaf953ac8d1422fd6635417ec38f29df1
|
d5552cda58e251e6a5983876681be8f641dea86f
|
/tests/models/layoutlmv2/test_processor_layoutlmv2.py
|
c1fdde7d7ccc98c5b3468f44993de239fafaa4ed
|
[
"Apache-2.0"
] |
permissive
|
patrickvonplaten/transformers
|
feb121e1ee82c317ac7561836b8f95a7de25fc1f
|
f738502979f6787609dcf0180e6606f464692e27
|
refs/heads/master
| 2022-12-08T10:15:34.743198
| 2022-11-22T11:00:20
| 2022-11-22T11:00:20
| 226,201,271
| 6
| 1
|
Apache-2.0
| 2019-12-05T22:39:46
| 2019-12-05T22:39:45
| null |
UTF-8
|
Python
| false
| false
| 25,057
|
py
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from typing import List
import numpy as np
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.models.layoutlmv2 import LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMv2FeatureExtractor, LayoutLMv2Processor
@require_pytesseract
@require_tokenizers
class LayoutLMv2ProcessorTest(unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
def setUp(self):
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
feature_extractor_map = {
"do_resize": True,
"size": 224,
"apply_ocr": True,
}
self.tmpdirname = tempfile.mkdtemp()
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.feature_extraction_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(feature_extractor_map) + "\n")
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_feature_extractor(self, **kwargs):
return LayoutLMv2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
feature_extractor = self.get_feature_extractor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = LayoutLMv2Processor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
# slow tokenizer
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
# fast tokenizer
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
def test_model_input_names(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = LayoutLMv2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
# add extra args
inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
@slow
def test_overflowing_tokens(self):
# In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).
from datasets import load_dataset
# set up
datasets = load_dataset("nielsr/funsd")
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
def preprocess_data(examples):
images = [Image.open(path).convert("RGB") for path in examples["image_path"]]
words = examples["words"]
boxes = examples["bboxes"]
word_labels = examples["ner_tags"]
encoded_inputs = processor(
images,
words,
boxes=boxes,
word_labels=word_labels,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
stride=50,
return_offsets_mapping=True,
return_tensors="pt",
)
return encoded_inputs
train_data = preprocess_data(datasets["train"])
self.assertEqual(len(train_data["image"]), len(train_data["input_ids"]))
# different use cases tests
@require_torch
@require_pytesseract
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
image_1 = Image.open(ds[0]["file"]).convert("RGB")
image_2 = Image.open(ds[1]["file"]).convert("RGB")
return image_1, image_2
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
input_feat_extract = feature_extractor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
# fmt: off
expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231
# fmt: on
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_feat_extract = feature_extractor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
# fmt: off
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # noqa: E231
# fmt: on
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] weirdly world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
# fmt: off
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231
# fmt: on
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
# fmt: off
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # noqa: E231
# fmt: on
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
|
[
"noreply@github.com"
] |
patrickvonplaten.noreply@github.com
|
5dc3ba5320c424f0a21bb4c6581942a0eecd817f
|
7d66a8382000bf84af00c6154d35b5fc37eeb20f
|
/torch_train/trainer.py
|
4f0d7cd576519e0e243b69d86182f46bd553123f
|
[] |
no_license
|
thisissum/emo_identification
|
54c507d692c048b84623c03d7a0ff64d2168a84f
|
20fdf41351061b0771a8ada7f782461370414650
|
refs/heads/master
| 2022-11-09T01:29:22.526057
| 2020-07-01T01:40:12
| 2020-07-01T01:40:12
| 276,248,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,649
|
py
|
import torch
from torch import nn
import numpy as np
import random
from tqdm import tqdm
from collections import deque
# TODO: distributed(): set to multi-gpu
# TODO: add logger
class Trainer(object):
def __init__(self, device=None, verbose=True, name='trainer'):
self.device = device if device else \
torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.name = name
self.verbose = verbose
self.setup = False
def build(self, model, optimizer, criterion, callbacks=None, metric=None):
self.model = model.to(self.device)
self.optimizer = optimizer
self.criterion = criterion
self.callbacks = callbacks
self.metric = metric
self.setup = True
def fit(self, train_loader, num_epoch, validate_loader=None):
# check everything
# TODO: add check module or function to replace this
assert self.setup == True, 'You should build trainer before fit'
if any([validate_loader, self.metric]):
assert all([validate_loader, self.metric])
# set verbose using tqdm
if verbose:
train_loader = tqdm(train_loader)
validate_loader = tqdm(validate_loader) if validate_loader else None
# train loop
for epoch in range(num_epoch):
loss = []
for i, data in enumerate(train_loader):
# TODO: use Summarizer to record info, not print
train_info = self._train_step()
loss.append(train_info['loss'])
print('epoch {}, current train loss: {}'.format(i+1, np.mean(loss)))
# validate loop
if validate_loader:
# TODO: find a more elegant way to pack this part
early_stop = False
with torch.no_grad():
self.metric.clear()
for data in validate_loader:
info = self._validate_step(data)
self.metric.display()
if self.callbacks:
for callback in self.callbacks:
try:
callback.step(self.metric.cur_metric)
except StopIteration:
early_stop = True
checkpoint_path = callback.path
if early_stop:
self.model.load_state_dict(torch.load(checkpoint_path))
print('early stop at epoch {}'.format(epoch))
break
def _train_step(self, data):
# forward batch
data = move_to_device(data)
y_pred, y_true = self.forward_batch(data)
# compute loss
loss = self.criterion(y_pred, y_true)
# update parameters
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# summary info in train step
info = {'loss': loss.item()}
return info
def predict(self, test_loader):
output = []
with torch.no_grad():
for data in test_loader:
data = move_to_device(data)
y_out = self.predict_batch(data)
output.append(y_out)
return torch.stack(output)
def _validate_step(self, data):
data = move_to_device(data)
y_pred, y_true = self.forward_batch(data)
self.metric.update_state(y_pred, y_true)
info = {'metric': self.metric.cur_metric}
return info
def forward_batch(self, data):
pass
def predict_batch(self, data):
pass
|
[
"ThisIsSunMH@gmail.com"
] |
ThisIsSunMH@gmail.com
|
f05a92dd67fd9668718e353680d935172e6db74e
|
c9a0ce3a60e5dba8edcb89e2d710ab5503e7e77a
|
/webtest/settings.py
|
12fa052f0631b36f20dd410d828b37b523386ad4
|
[] |
no_license
|
Talgin/tgt_blog
|
cfa7f3345eb647d5f4ce44f2ef0e7434f8bc7599
|
42565d8e9735f05bb4279cbdab66d58eb1400537
|
refs/heads/master
| 2022-07-16T05:15:26.980964
| 2020-05-13T12:48:04
| 2020-05-13T12:48:04
| 263,610,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,231
|
py
|
"""
Django settings for webtest project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&m7plgf$9wx(*w98*gtp9^ahta*j+0g*mlzen&_z32to$3p!u^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webtest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webtest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
# LANGUAGE_CODE = 'kk'
TIME_ZONE = 'Asia/Almaty'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"talgat90.07@gmail.com"
] |
talgat90.07@gmail.com
|
9e00d81faae31bf4f5fdc1e3f30ff046128cfc9f
|
37c414d5f5eab3e5063f09e0bd2a0510dc554264
|
/regression/week5/week-5-lasso-assignment-1.py
|
6ac7417fd385719e460691c6c7cb6d0591d7f0f9
|
[] |
no_license
|
obh/coursera
|
17981934bc9c6a47c6c57b7b772594c6c7225065
|
880a8aa960e034dad986617158ba4095ba6b2d98
|
refs/heads/master
| 2021-01-10T05:50:50.937061
| 2016-03-28T06:39:45
| 2016-03-28T06:39:45
| 51,063,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,535
|
py
|
# coding: utf-8
# # Regression Week 5: Feature Selection and LASSO (Interpretation)
# In this notebook, you will use LASSO to select features, building on a pre-implemented solver for LASSO (using GraphLab Create, though you can use other solvers). You will:
# * Run LASSO with different L1 penalties.
# * Choose best L1 penalty using a validation set.
# * Choose best L1 penalty using a validation set, with additional constraint on the size of subset.
#
# In the second notebook, you will implement your own LASSO solver, using coordinate descent.
# # Fire up graphlab create
# In[1]:
import graphlab
# # Load in house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[2]:
sales = graphlab.SFrame('kc_house_data.gl/')
# # Create new features
# As in Week 2, we consider features that are some transformations of inputs.
# In[3]:
from math import log, sqrt
sales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)
sales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)
sales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']
# In the dataset, 'floors' was defined with type string,
# so we'll convert them to float, before creating a new feature.
sales['floors'] = sales['floors'].astype(float)
sales['floors_square'] = sales['floors']*sales['floors']
# * Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.
# * On the other hand, taking square root of sqft_living will decrease the separation between big house and small house. The owner may not be exactly twice as happy for getting a house that is twice as big.
# # Learn regression weights with L1 penalty
# Let us fit a model with all the features available, plus the features we just created above.
# In[5]:
all_features = ['bedrooms', 'bedrooms_square',
'bathrooms',
'sqft_living', 'sqft_living_sqrt',
'sqft_lot', 'sqft_lot_sqrt',
'floors', 'floors_square',
'waterfront', 'view', 'condition', 'grade',
'sqft_above',
'sqft_basement',
'yr_built', 'yr_renovated']
# Applying L1 penalty requires adding an extra parameter (`l1_penalty`) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set `l2_penalty=0` to ensure we don't introduce an additional L2 penalty.
# In[6]:
model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,
validation_set=None,
l2_penalty=0., l1_penalty=1e10)
# Find what features had non-zero weight.
# In[9]:
model_all.coefficients.print_rows(num_rows=18)
# Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
#
# ***QUIZ QUESTION***:
# According to this list of weights, which of the features have been chosen?
# # Selecting an L1 penalty
# To find a good L1 penalty, we will explore multiple values using a validation set. Let us do three way split into train, validation, and test sets:
# * Split our sales data into 2 sets: training and test
# * Further split our training data into two sets: train, validation
#
# Be *very* careful that you use seed = 1 to ensure you get the same answer!
# In[10]:
(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split
(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate
# Next, we write a loop that does the following:
# * For `l1_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, type `np.logspace(1, 7, num=13)`.)
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list.
# * Compute the RSS on VALIDATION data (here you will want to use `.predict()`) for that `l1_penalty`
# * Report which `l1_penalty` produced the lowest RSS on validation data.
#
# When you call `linear_regression.create()` make sure you set `validation_set = None`.
#
# Note: you can turn off the print out of `linear_regression.create()` with `verbose = False`
# In[15]:
import numpy as np
l1_penalty_arr = np.logspace(1, 7, num=13)
def getRSS(prediction, actual):
diff = prediction - actual
diff_squared = diff.apply(lambda x : x*x)
diff_squared_sum = diff_squared.sum()
return diff_squared_sum
MIN = 987654321987654321987654321
l1_penalty_min = -1
for l1_penalty in l1_penalty_arr:
model = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0., l1_penalty=l1_penalty)
RSS = getRSS(model.predict(validation), validation['price'])
print str(l1_penalty) + " " + str(RSS)
if RSS < MIN:
MIN = RSS
l1_penalty_min = l1_penalty
print str(l1_penalty_min) + " " + str(MIN)
# In[16]:
model = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0., l1_penalty=10.0)
print str(getRSS(model.predict(testing), testing['price']))
# *** QUIZ QUESTIONS ***
# 1. What was the best value for the `l1_penalty`?
# 2. What is the RSS on TEST data of the model with the best `l1_penalty`?
# In[18]:
model.coefficients.print_rows(num_rows=18)
# ***QUIZ QUESTION***
# Also, using this value of L1 penalty, how many nonzero weights do you have?
# In[ ]:
# # Limit the number of nonzero weights
#
# What if we absolutely wanted to limit ourselves to, say, 7 features? This may be important if we want to derive "a rule of thumb" --- an interpretable model that has only a few features in them.
# In this section, you are going to implement a simple, two phase procedure to achive this goal:
# 1. Explore a large range of `l1_penalty` values to find a narrow region of `l1_penalty` values where models are likely to have the desired number of non-zero weights.
# 2. Further explore the narrow region you found to find a good value for `l1_penalty` that achieves the desired sparsity. Here, we will again use a validation set to choose the best value for `l1_penalty`.
# In[63]:
max_nonzeros = 7
# ## Exploring the larger range of values to find a narrow range with the desired sparsity
#
# Let's define a wide range of possible `l1_penalty_values`:
# In[62]:
l1_penalty_values = np.logspace(8, 10, num=20)
# Now, implement a loop that search through this space of possible `l1_penalty` values:
#
# * For `l1_penalty` in `np.logspace(8, 10, num=20)`:
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`
# * Extract the weights of the model and count the number of nonzeros. Save the number of nonzeros to a list.
# * *Hint: `model['coefficients']['value']` gives you an SArray with the parameters you learned. If you call the method `.nnz()` on it, you will find the number of non-zero parameters!*
# In[64]:
nnz_arr = {}
for l1_penalty in l1_penalty_values:
model = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0., l1_penalty=l1_penalty)
nnz_arr[l1_penalty] = model['coefficients']['value'].nnz()
l1_penalty_min = max( filter(lambda x: nnz_arr[x] > max_nonzeros, nnz_arr.keys()) )
l1_penalty_max = min( filter(lambda x: nnz_arr[x] < max_nonzeros, nnz_arr.keys()) )
print l1_penalty_min
print l1_penalty_max
# Out of this large range, we want to find the two ends of our desired narrow range of `l1_penalty`. At one end, we will have `l1_penalty` values that have too few non-zeros, and at the other end, we will have an `l1_penalty` that has too many non-zeros.
#
# More formally, find:
# * The largest `l1_penalty` that has more non-zeros than `max_nonzero` (if we pick a penalty smaller than this value, we will definitely have too many non-zero weights)
# * Store this value in the variable `l1_penalty_min` (we will use it later)
# * The smallest `l1_penalty` that has fewer non-zeros than `max_nonzero` (if we pick a penalty larger than this value, we will definitely have too few non-zero weights)
# * Store this value in the variable `l1_penalty_max` (we will use it later)
#
#
# *Hint: there are many ways to do this, e.g.:*
# * Programmatically within the loop above
# * Creating a list with the number of non-zeros for each value of `l1_penalty` and inspecting it to find the appropriate boundaries.
# l1_penalty_min = max( filter(lambda x: nnz_arr[x] > max_nonzeros, nnz_arr.keys()) )
# l1_penalty_max = min( filter(lambda x: nnz_arr[x] < max_nonzeros, nnz_arr.keys()) )
#
#
# print l1_penalty_min
# print l1_penalty_max
# ***QUIZ QUESTIONS***
#
# What values did you find for `l1_penalty_min` and`l1_penalty_max`?
# ## Exploring the narrow range of values to find the solution with the right number of non-zeros that has lowest RSS on the validation set
#
# We will now explore the narrow region of `l1_penalty` values we found:
# In[54]:
l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)
# * For `l1_penalty` in `np.linspace(l1_penalty_min,l1_penalty_max,20)`:
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`
# * Measure the RSS of the learned model on the VALIDATION set
#
# Find the model that the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzero`.
# In[57]:
MIN = 987654321987654321987654321
min_l1_penalty = -1
for l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20):
model = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0., l1_penalty=l1_penalty)
RSS = getRSS(model.predict(validation), validation['price'])
if RSS < MIN and model['coefficients']['value'].nnz() == max_nonzeros:
MIN = RSS
min_l1_penalty = l1_penalty
print str(min_l1_penalty)
# ***QUIZ QUESTIONS***
# 1. What value of `l1_penalty` in our narrow range has the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzeros`?
# 2. What features in this model have non-zero coefficients?
# In[60]:
model = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0., l1_penalty=3448968612.16)
model.coefficients.print_rows(num_rows=18)
# In[ ]:
|
[
"rsedwardian@gmail.com"
] |
rsedwardian@gmail.com
|
9e5945b82757e60f6524990a32b8dc5a0d66db01
|
ea318d69aa23ee4560a28abf34f057f3ae62ee3a
|
/pyroball/models/svi_dynamic.py
|
efc7d2a6e645b82ac91a5ce72e68e42ffdfc5f47
|
[] |
no_license
|
anguswilliams91/pyroball
|
e52450fad2d4898ef3192bd433d74d1c3b7ab0f6
|
39c642e20ac3ee5dc5cc75dfc783beb973ca8850
|
refs/heads/master
| 2020-11-24T08:53:48.806541
| 2019-12-31T13:22:56
| 2019-12-31T13:22:56
| 228,063,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,613
|
py
|
import pyro
from pyro import distributions as dist
from pyro.infer import Trace_ELBO, SVI, Predictive
from pyro.optim import Adam
from pyro.poutine import condition
from pyro.contrib.autoguide import (
AutoMultivariateNormal, AutoDiagonalNormal, AutoIAFNormal, AutoLaplaceApproximation
)
import torch
from pyroball.util import early_stopping
class SVIDynamicModel:
def __init__(self):
self.team_to_index = None
self.index_to_team = None
self.n_teams = None
self.samples = None
def model(self, home_team, away_team, gameweek):
n_gameweeks = max(gameweek) + 1
gamma = pyro.sample("gamma", dist.LogNormal(0, 1))
mu_b = pyro.sample("mu_b", dist.Normal(0, 1))
with pyro.plate("teams", self.n_teams):
log_a0 = pyro.sample("log_a0", dist.Normal(0, 1))
log_b0 = pyro.sample("log_b0", dist.Normal(mu_b, 1))
sigma_rw = pyro.sample("sigma_rw", dist.HalfNormal(0.1))
with pyro.plate("random_walk", n_gameweeks - 1):
diffs_a = pyro.sample("diff_a", dist.Normal(0, sigma_rw))
diffs_b = pyro.sample("diff_b", dist.Normal(0, sigma_rw))
log_a0_t = log_a0 if log_a0.dim() == 2 else log_a0[None, :]
diffs_a = torch.cat((log_a0_t, diffs_a), axis=0)
log_a = torch.cumsum(diffs_a, axis=0)
log_b0_t = log_b0 if log_b0.dim() == 2 else log_b0[None, :]
diffs_b = torch.cat((log_b0_t, diffs_b), axis=0)
log_b = torch.cumsum(diffs_b, axis=0)
pyro.sample("log_a", dist.Delta(log_a), obs=log_a)
pyro.sample("log_b", dist.Delta(log_b), obs=log_b)
home_inds = torch.tensor([self.team_to_index[team] for team in home_team])
away_inds = torch.tensor([self.team_to_index[team] for team in away_team])
home_rate = torch.clamp(
log_a[gameweek, home_inds] - log_b[gameweek, away_inds] + gamma, -7, 2
)
away_rate = torch.clamp(
log_a[gameweek, away_inds] - log_b[gameweek, home_inds], -7, 2
)
pyro.sample("home_goals", dist.Poisson(torch.exp(home_rate)))
pyro.sample("away_goals", dist.Poisson(torch.exp(away_rate)))
def fit(
self,
df,
max_iter=6000,
patience=200,
optimiser_settings={"lr": 1.0e-2},
elbo_kwargs={"num_particles": 5},
):
teams = sorted(list(set(df["home_team"]) | set(df["away_team"])))
home_team = df["home_team"].values
away_team = df["away_team"].values
home_goals = torch.tensor(df["home_goals"].values, dtype=torch.float32)
away_goals = torch.tensor(df["away_goals"].values, dtype=torch.float32)
gameweek = ((df["date"] - df["date"].min()).dt.days // 7).values
self.team_to_index = {team: i for i, team in enumerate(teams)}
self.index_to_team = {value: key for key, value in self.team_to_index.items()}
self.n_teams = len(teams)
self.min_date = df["date"].min()
conditioned_model = condition(
self.model, data={"home_goals": home_goals, "away_goals": away_goals}
)
guide = AutoDiagonalNormal(conditioned_model)
optimizer = Adam(optimiser_settings)
elbo = Trace_ELBO(**elbo_kwargs)
svi = SVI(conditioned_model, guide, optimizer, loss=elbo)
pyro.clear_param_store()
fitted_svi, losses = early_stopping(
svi, home_team, away_team, gameweek, max_iter=max_iter, patience=patience
)
self.guide = guide
return losses
def _predict(self, home_team, away_team, dates, num_samples=100, seed=42):
predictive = Predictive(
self.model,
guide=self.guide,
num_samples=num_samples,
return_sites=("home_goals", "away_goals"),
)
home_team = [home_team] if isinstance(home_team, str) else home_team
away_team = [away_team] if isinstance(away_team, str) else away_team
missing_teams = set(list(home_team) + list(away_team)) - set(self.team_to_index.keys())
for team in missing_teams:
new_index = max(self.team_to_index.values()) + 1
self.team_to_index[team] = new_index
self.index_to_team[new_index] = team
self.n_teams += 1
gameweek = (dates - self.min_date).dt.days // 7
predictions = predictive.get_samples(home_team, away_team, gameweek)
return (
predictions["home_goals"].detach().numpy(),
predictions["away_goals"].detach().numpy(),
)
|
[
"anguswilliams91@gmail.com"
] |
anguswilliams91@gmail.com
|
a0eb71c3bb7ac2b1ce80fd98687dd4b357826921
|
63bbe2f37dc3f9dd11e31c5f0dbcaee362bf069f
|
/weixitest/basic.py
|
e4a1cccf9cdbae0dc547afab22bd737cf39de142
|
[] |
no_license
|
yuxuefendou/weixin
|
a3809e86e066a5c14abf38c663834ccf2e4a43cb
|
73ebd84c0d8b68214d89cf1a53597544e926b8fc
|
refs/heads/master
| 2021-04-06T12:02:38.724598
| 2018-03-09T09:07:56
| 2018-03-09T09:07:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Created on 13:51 2017/11/4
@author: acer
'''
import urllib
import time
import json
class Basic:
def __init__(self):
self.__accessToken = ''
self.__leftTime = 0
def __real_get_acces_token(self):
# appId="wxf6e4bea81a14c7e4"
# appSecret = "58cd237a1abf8f1e775e8301dfd78e3e"
appId = "wxf4f421a98811617a"
appSecret = "5e5f459dc1b1998d58dc7f40188adaa6"
postUrl = ("https://api.weixin.qq.com/cgi-bin/token?grant_type="
"client_credential&appid=%s&secret=%s" % (appId, appSecret))
urlResp = urllib.urlopen(postUrl)
urlResp = json.loads(urlResp.read())
#打印access_token
print urlResp
self.__accessToken = urlResp['access_token']
self.__leftTime = urlResp['expires_in']
def get_access_token(self):
if self.__leftTime<10:
self.__real_get_acces_token()
return self.__accessToken
def run(self):
while(True):
if self.__leftTime>10:
time.sleep(2)
self.__leftTime -=2
else:
self.__real_get_acces_token()
|
[
"1131419557@qq.com"
] |
1131419557@qq.com
|
9c0b88760d4b0038094db668b133cab22473a766
|
e3b34c15197f73f1e85dad715bf951be74b54721
|
/main/classes/ContentManager.py
|
99a79c74158f0ea82daaa012fe57256f00c341b0
|
[] |
no_license
|
Kingdomdark/ProjectOP2
|
5531a051fe7f256b3789b4065c93cff373b7070d
|
2b390bb8e02d23b3ca3a73edf1b7ff7b1d2e46e8
|
refs/heads/master
| 2021-01-11T20:53:38.109054
| 2017-02-01T18:11:26
| 2017-02-01T18:11:26
| 79,206,773
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
from .QuestionManager import questionmanager
from .MenuManager import menumanager
from .GameManager import gamemanager
from .TileManager import tilemanager
from .AudioVisualManager import spritemanager
class contentmanager:
def __init__(self,screen):
self.qm = questionmanager()
self.sm = spritemanager()
self.tm = tilemanager(screen,self.sm)
self.mm = menumanager(screen,self)
self.gm = gamemanager(screen,self,self.qm,self.tm)
self.stage = 0
def switchtogame(self):
self.stage = 1
def switchtomenu(self):
self.stage = 0
def update(self):
if self.stage == 0:
self.mm.update()
elif self.stage == 1:
self.gm.update()
def draw(self):
if self.stage == 0:
self.mm.draw()
elif self.stage == 1:
self.gm.draw()
|
[
"jmolendijk93@gmail.com"
] |
jmolendijk93@gmail.com
|
e20393aff022c9f8bb1cabe7899c8e008eba862b
|
394072f7fd3e2a226aeed78bf0a4f587f4c4e383
|
/Tree_gmb/get_name_dict.py
|
1f5dea360f5e10e3d7c5edcd4054f3d938068771
|
[] |
no_license
|
LeonCrashCode/DRSparsing
|
ec5cca079a2c73eb512444e1ac86215722e6503a
|
c7e92beb8878ff2386bc6789e6c17f0d35bf1277
|
refs/heads/master
| 2020-03-16T09:52:11.217219
| 2019-01-17T14:20:16
| 2019-01-17T14:20:16
| 124,549,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,991
|
py
|
import os
import sys
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True)
args = parser.parse_args()
v_p = re.compile("^[XESTPKB][0-9]+$")
d_p = re.compile("^DRS-[0-9]+\($")
pb = re.compile("^P[0-9]+\($")
kb = re.compile("^K[0-9]+\($")
def correct(tree):
# Here we correct some weired things
#e.g. :( K1 K2 )
for i in range(len(tree)):
if tree[i] == ":(" and tree[i+1][0] == "K":
tree[i] = "THAT("
if tree[i] == "-(" and tree[i+1][0] == "K":
tree[i] = "THAT("
if tree[i] == "((" and tree[i+1][0] == "K":
tree[i] = "THAT("
def bracket2list(bracket):
stack = []
for tok in bracket:
if tok[-1] == "(":
stack.append([tok])
elif tok == ")":
if len(stack) != 1:
back = stack.pop()
stack[-1].append(back)
else:
stack[-1].append(tok)
assert len(stack) == 1
return stack[0]
def tree2ground(tree):
v = ["X","E","S","T","B","P","K"]
vl = [ [] for i in range(7)]
for i in range(len(tree)):
tok = tree[i]
if pb.match(tok):
assert tok[:-1] not in vl[-2]
vl[-2].append(tok[:-1])
assert v_p.match(tree[i+1]) and tree[i+1][0] == "B"
if tree[i+1] != "B0" and tree[i+1] not in vl[-3]:
vl[-3].append(tree[i+1])
if kb.match(tok):
assert tok[:-1] not in vl[-1]
vl[-1].append(tok[:-1])
if tok in ["NOT(", "POS(", "NEC(", "IMP(", "OR(", "DUP("]:
assert v_p.match(tree[i+1]) and tree[i+1][0] == "B"
if tree[i+1] != "B0" and tree[i+1] not in vl[-3]:
vl[-3].append(tree[i+1])
#print vl
#exit(1)
root = bracket2list(tree)
def is_struct(tok):
if tok in ["DRS(", "SDRS(", "NOT(", "POS(", "NEC(", "IMP(", "OR(", "DUP("]:
return True
if d_p.match(tok):
return True
if re.match("^[PK][0-9]+\($", tok):
return True
return False
def travel(root):
#global v
#global vl
parent = root[0]
child = root[1:]
if parent == "SDRS(":
for c in child:
if not is_struct(c[0]):
for cc in c[1:]:
if v_p.match(cc):
idx = v.index(cc[0])
if cc not in vl[idx]:
vl[idx].append(cc)
for c in child:
if is_struct(c[0]):
travel(c)
elif parent == "DRS(" or d_p.match(parent):
for c in child:
if not is_struct(c[0]):
assert c[1][0] == "B"
assert v_p.match(c[1]) and c[1][0] == "B"
idx = v.index(c[1][0])
if c[1] not in vl[idx] and c[1] != "B0":
vl[idx].append(c[1])
for cc in c[2:]:
if v_p.match(cc):
idx = v.index(cc[0])
if cc not in vl[idx]:
vl[idx].append(cc)
for c in child:
if is_struct(c[0]):
travel(c)
elif pb.match(parent) or parent in ["NOT(", "POS(", "NEC(", "IMP(", "OR(", "DUP("]:
for c in child[1:]:
travel(c)
elif kb.match(parent):
for c in child:
travel(c)
travel(root)
correct(tree)
# normalize variables
i = 0
cur = 0
while i < len(tree):
tok = tree[i]
if pb.match(tok):
idx = vl[-2].index(tok[:-1])
tree[i] = "P"+str(idx+1)+"("
elif kb.match(tok):
idx = vl[-1].index(tok[:-1])
tree[i] = "K"+str(idx+1)+"("
elif v_p.match(tok) and tok != "B0":
vl_idx = v.index(tok[0])
idx = vl[vl_idx].index(tok)
tree[i] = v[vl_idx] + str(idx+1)
i += 1
# normalize scoped K and P
p_n = 0
k_n = 0
for i in range(len(tree)):
tok = tree[i]
if pb.match(tok):
assert int(tok[1:-1]) == p_n + 1
#tree[i] = "@P("
p_n += 1
if kb.match(tok):
assert int(tok[1:-1]) == k_n + 1
#tree[i] = "@K("
k_n += 1
#print tree
n_tree = []
stack = []
i = 0
while i < len(tree):
t = tree[i]
if v_p.match(t):
#n_tree.append(t)
i += 1
elif t in ["DRS(", "SDRS("]:
#n_tree.append(t)
stack.append(-1)
i += 1
elif d_p.match(t):
#n_tree.append(t)
stack.append(int(t[4:-1])) #DRS-10(
i += 1
elif pb.match(t) or kb.match(t):
#n_tree.append(t)
stack.append(-1)
i += 1
elif t == ")":
#n_tree.append(t)
stack.pop()
i += 1
elif t in ["NOT(", "POS(", "NEC(", "IMP(", "OR(", "DUP("]:
#n_tree.append(t)
stack.append(-1)
i += 1
elif t == "Named(": #Named( B0 X1 $1[John] )
assert stack[-1] != -1
idx = tree[i:].index(")")
n_tree.append(t)
n_tree.append(tree[i+1])
n_tree.append(tree[i+2])
exps = []
cons = []
for item in tree[i+3:i+idx]:
assert re.match("^\$[0-9]+\[.+\]$", item)
j = item.index("[")
exps.append(item[j+1:-1])
cons.append(words[stack[-1]][int(item[1:j])])
exp = "~".join(exps)
cons = "~".join(cons)
if cons not in name_dict:
name_dict[cons] = [exp]
elif exp not in name_dict[cons]:
name_dict[cons].append(exp)
i = i + idx + 1
elif t == "Card(":
assert stack[-1] != -1
idx = tree[i:].index(")")
i = i + idx + 1
elif re.match("^T[yx][mx][dx]\($", t):
assert stack[-1] != -1
idx = tree[i:].index(")")
i = i + idx + 1
elif t[-1] == "(":
idx = tree[i:].index(")")
i = i + idx + 1
else:
assert False, "unrecognized format"
def filter(illform, tree):
#filter two long sentences, actually only one
"""
cnt = 0
for item in tree:
if item == "DRS(":
cnt += 1
if cnt >= 21:
return True
"""
for item in illform:
if item in tree:
return True
return False
if __name__ == "__main__":
illform = []
if os.path.exists("manual_correct2"):
for line in open("manual_correct2"):
line = line.strip()
if line == "" or line[0] == "#":
continue
illform.append(line.split()[0])
global words
global name_dict
name_dict = {}
lines = []
filename = ""
for line in open(args.input):
line = line.strip()
if line == "":
idx = lines.index("TREE")
words = lines[:idx]
for i in range(len(words)):
words[i] = words[i].split()
tree = lines[idx+1].split()
if filter(illform, tree):
lines = []
continue
tree2ground(tree)
lines = []
else:
if line[0] == "#":
filename = line.split()[-1]
continue
lines.append(line)
keys = name_dict.keys()
keys.sort()
for key in keys:
print key, name_dict[key], len(name_dict[key])
|
[
"jiangming_liu@JM-MacBook.local"
] |
jiangming_liu@JM-MacBook.local
|
45ec02892820ec21cd41b25d8e7086dab47d0908
|
61eaefeba9eb3e1b8fbbc72e6ab76c2bdf8d1cd5
|
/基于规则模型/cos.py
|
11f305c82ccc30bd4b2e9fb71a77f8effc8590fb
|
[] |
no_license
|
wutonghua/fenlei
|
f8b9ce00181623ba17bec799d5223f768a4c69ff
|
40bca4e360ee68db929386fc8522abe511d08b62
|
refs/heads/master
| 2021-04-15T12:33:41.114718
| 2018-03-23T01:49:16
| 2018-03-23T01:49:16
| 126,417,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy
def moreCos(vec1,vec2):
num = float(numpy.sum(vec1 * vec2))
denom = numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2)
cos = num / denom
return 1-cos
def ComputerNearestNeighbor(line_xl,pipei_xl):
distance=[]
for i in range(len(pipei_xl)):
dist = moreCos(numpy.array(line_xl), numpy.array(pipei_xl[i]))
distance.append((dist,i))
distance.sort()
return distance
|
[
"382291189@qq.com"
] |
382291189@qq.com
|
59c40a0fbcaea3a03fba7f05f4950f4b98fdca8c
|
519c809b9c1384f95b04069d73857d42e83e5336
|
/t1.py
|
24dd66bd8e989fb4e3fc89562a88df8498d23fc4
|
[] |
no_license
|
indranilpradhan/Recidivism
|
79eb536dba1569132f6d731e7d00076dcc12c88d
|
7fdf3f6e0e778dfcd8315935eef9925e9ffa9558
|
refs/heads/main
| 2023-02-02T05:04:13.861220
| 2020-12-21T11:50:16
| 2020-12-21T11:50:16
| 323,320,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import os
import pandas as pd
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import StackingClassifier
TRAIN_DATA_PATH = os.getenv("TRAIN_DATA_PATH")
TEST_DATA_PATH = os.getenv("TEST_DATA_PATH")
train_data = pd.read_csv(TRAIN_DATA_PATH)
X_train, y_train = train_data.iloc[:,:-1], train_data.iloc[:,-1]
level_0 = list()
level_0.append(('RF', RandomForestClassifier(n_estimators=700)))
level_0.append(('LR',LogisticRegression(max_iter=6000)))
level_1 = SVC(C=1.2)
model = StackingClassifier(estimators=level_0, final_estimator=level_1, cv=4)
model.fit(X_train, y_train)
test_data = pd.read_csv(TEST_DATA_PATH)
submission = model.predict(test_data)
submission = pd.DataFrame(submission)
submission.to_csv('submission.csv', header=['class'], index=False)
|
[
"noreply@github.com"
] |
indranilpradhan.noreply@github.com
|
69ce06a042be86047ac1e130738833f21f227950
|
e80094d85041869d8900c511d73236fd1100aa5c
|
/WebService/AuthenticationHandler.py
|
44c569363f5abf21afa0a174d2fc90ce559660ee
|
[] |
no_license
|
aghilesDev/Auth-Server
|
35585973332775ac5caa3de7dc9c843181f89751
|
24de73b5a0e5a7467fa8f51feca392d6c9cb506d
|
refs/heads/master
| 2020-12-01T00:10:06.538050
| 2019-12-27T21:08:52
| 2019-12-27T21:08:52
| 230,516,477
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
from .Validator import SignatureValidator#,SignatureProvider
from .TokenGenerator import TokenGenerator
import sys
#sys.path.insert(0,'../DataLayer')
from DataLayer.DBRepository import UserRepository
from DataLayer.BCKRepository import AuthenticationRepository as AuthRepo
#class that handles the different step of the authentication procedure
class AuthenticationHandler:
def __init__(self,userRepo=UserRepository,signatureValidator=SignatureValidator,tokenGenerator=TokenGenerator,authRepo=AuthRepo):
self.userRepo=userRepo()
self.signatureValidator=signatureValidator()
self.tokenGen=tokenGenerator()
self.authRepo=authRepo()
pass
def generateChallenge(self,username):
user=self.userRepo.readUser(username)
if(user is None):
#error
print('erreur')
return None
token=self.tokenGen.generateChallengeToken(user.id)
return token
def AuthentificateToken(self,token,signature):
if(self.tokenGen.verifyChallengeToken(token) is False):
return None #token non valide
data=self.tokenGen.decryptToken(token)
userid=data.get('id')
user=self.userRepo.readUserById(userid)
if(user is None):
return None #user n'existe pas
publicKey=self.authRepo.getPublicKeyUser(user.Contract)
if self.signatureValidator.validate(publicKey=publicKey,token=token,signature=signature):#la chaine de caractere du token
return self.tokenGen.generateAuthentificationToken(user.id) #l'utilisateur est authentifier on lui donne un token d'authentification
return None #signature ne correspond pas
authHandler=AuthenticationHandler()
|
[
"aghiles.goumeziane@gmail.com"
] |
aghiles.goumeziane@gmail.com
|
d275d957a097e664c600ccebdc43269bc3a1b768
|
769d4ba08db6ad3707d8435b2d6ec3ca7fbf4a5d
|
/blender/arm/material/make_mesh.py
|
283a58445013255cf0ef9bfd94e64b3cdf787d93
|
[
"Zlib",
"GPL-2.0-only"
] |
permissive
|
Leonix/armory
|
9f2eee057f2fa66efc306e9e832806aa6ea124ba
|
3eaf2dc569d7caf4a7a7c928575d45cbb9aa3c14
|
refs/heads/master
| 2020-04-15T11:45:55.385644
| 2019-01-07T14:41:06
| 2019-01-07T14:41:06
| 164,643,287
| 0
| 0
|
Zlib
| 2019-01-08T12:22:26
| 2019-01-08T12:22:25
| null |
UTF-8
|
Python
| false
| false
| 27,501
|
py
|
import bpy
import arm.assets as assets
import arm.material.mat_state as mat_state
import arm.material.mat_utils as mat_utils
import arm.material.cycles as cycles
import arm.material.make_tess as make_tess
import arm.material.make_particle as make_particle
import arm.material.make_cluster as make_cluster
import arm.material.make_finalize as make_finalize
import arm.material.make_attrib as make_attrib
import arm.utils
is_displacement = False
write_material_attribs = None
write_material_attribs_post = None
write_vertex_attribs = None
def make(context_id):
rpdat = arm.utils.get_rp()
rid = rpdat.rp_renderer
con = { 'name': context_id, 'depth_write': True, 'compare_mode': 'less', 'cull_mode': 'clockwise' }
# Blend context
mat = mat_state.material
blend = mat.arm_blending
particle = mat_state.material.arm_particle_flag
dprepass = rid == 'Forward' and rpdat.rp_depthprepass
if blend:
con['name'] = 'blend'
con['blend_source'] = mat.arm_blending_source
con['blend_destination'] = mat.arm_blending_destination
con['blend_operation'] = mat.arm_blending_operation
con['alpha_blend_source'] = mat.arm_blending_source_alpha
con['alpha_blend_destination'] = mat.arm_blending_destination_alpha
con['alpha_blend_operation'] = mat.arm_blending_operation_alpha
con['depth_write'] = False
con['compare_mode'] = 'less'
elif particle:
pass
elif dprepass: # Depth prepass was performed
con['depth_write'] = False
con['compare_mode'] = 'equal'
con_mesh = mat_state.data.add_context(con)
mat_state.con_mesh = con_mesh
if rid == 'Forward' or blend:
if rpdat.arm_material_model == 'Mobile':
make_forward_mobile(con_mesh)
elif rpdat.arm_material_model == 'Solid':
make_forward_solid(con_mesh)
else:
make_forward(con_mesh)
elif rid == 'Deferred':
make_deferred(con_mesh)
elif rid == 'Raytracer':
make_raytracer(con_mesh)
make_finalize.make(con_mesh)
assets.vs_equal(con_mesh, assets.shader_cons['mesh_vert'])
return con_mesh
def make_base(con_mesh, parse_opacity):
global is_displacement
global write_material_attribs
global write_material_attribs_post
global write_vertex_attribs
vert = con_mesh.make_vert()
frag = con_mesh.make_frag()
geom = None
tesc = None
tese = None
vert.add_uniform('mat3 N', '_normalMatrix')
vert.write_attrib('vec4 spos = vec4(pos.xyz, 1.0);')
vattr_written = False
rpdat = arm.utils.get_rp()
is_displacement = mat_utils.disp_linked(mat_state.output_node)
if is_displacement:
if rpdat.arm_rp_displacement == 'Vertex':
frag.ins = vert.outs
else: # Tessellation
tesc = con_mesh.make_tesc()
tese = con_mesh.make_tese()
tesc.ins = vert.outs
tese.ins = tesc.outs
frag.ins = tese.outs
make_tess.tesc_levels(tesc, rpdat.arm_tess_mesh_inner, rpdat.arm_tess_mesh_outer)
make_tess.interpolate(tese, 'wposition', 3, declare_out=True)
make_tess.interpolate(tese, 'wnormal', 3, declare_out=True, normalize=True)
# No displacement
else:
frag.ins = vert.outs
if write_vertex_attribs != None:
vattr_written = write_vertex_attribs(vert)
frag.add_include('compiled.inc')
written = False
if write_material_attribs != None:
written = write_material_attribs(con_mesh, frag)
if written == False:
frag.write('vec3 basecol;')
frag.write('float roughness;')
frag.write('float metallic;')
frag.write('float occlusion;')
frag.write('float specular;')
if parse_opacity:
frag.write('float opacity;')
cycles.parse(mat_state.nodes, con_mesh, vert, frag, geom, tesc, tese, parse_opacity=parse_opacity)
if write_material_attribs_post != None:
write_material_attribs_post(con_mesh, frag)
if not is_displacement and not vattr_written:
make_attrib.write_vertpos(vert)
if con_mesh.is_elem('tex'):
vert.add_out('vec2 texCoord')
vert.add_uniform('float texUnpack', link='_texUnpack')
if mat_state.material.arm_tilesheet_mat:
if mat_state.material.arm_particle_flag and rpdat.arm_particles == 'GPU':
make_particle.write_tilesheet(vert)
else:
vert.add_uniform('vec2 tilesheetOffset', '_tilesheetOffset')
vert.write_attrib('texCoord = tex * texUnpack + tilesheetOffset;')
else:
vert.write_attrib('texCoord = tex * texUnpack;')
if tese != None:
tese.write_pre = True
make_tess.interpolate(tese, 'texCoord', 2, declare_out=frag.contains('texCoord'))
tese.write_pre = False
if con_mesh.is_elem('tex1'):
vert.add_out('vec2 texCoord1')
vert.add_uniform('float texUnpack', link='_texUnpack')
vert.write_attrib('texCoord1 = tex1 * texUnpack;')
if tese != None:
tese.write_pre = True
make_tess.interpolate(tese, 'texCoord1', 2, declare_out=frag.contains('texCoord1'))
tese.write_pre = False
if con_mesh.is_elem('col'):
vert.add_out('vec3 vcolor')
vert.write_attrib('vcolor = col.rgb;')
if tese != None:
tese.write_pre = True
make_tess.interpolate(tese, 'vcolor', 3, declare_out=frag.contains('vcolor'))
tese.write_pre = False
if con_mesh.is_elem('tang'):
if tese != None:
vert.add_out('vec3 wnormal')
make_attrib.write_norpos(con_mesh, vert)
tese.add_out('mat3 TBN')
tese.write('vec3 wbitangent = normalize(cross(wnormal, wtangent));')
tese.write('TBN = mat3(wtangent, wbitangent, wnormal);')
else:
vert.add_out('mat3 TBN')
make_attrib.write_norpos(con_mesh, vert, declare=True)
vert.write('vec3 tangent = normalize(N * tang.xyz);')
vert.write('vec3 bitangent = normalize(cross(wnormal, tangent));')
vert.write('TBN = mat3(tangent, bitangent, wnormal);')
else:
vert.add_out('vec3 wnormal')
make_attrib.write_norpos(con_mesh, vert)
frag.write_attrib('vec3 n = normalize(wnormal);')
if is_displacement:
if rpdat.arm_rp_displacement == 'Vertex':
sh = vert
else:
sh = tese
sh.add_uniform('mat4 VP', '_viewProjectionMatrix')
sh.write('wposition += wnormal * disp * 0.1;')
sh.write('gl_Position = VP * vec4(wposition, 1.0);')
def make_deferred(con_mesh):
wrd = bpy.data.worlds['Arm']
rpdat = arm.utils.get_rp()
arm_discard = mat_state.material.arm_discard
parse_opacity = arm_discard
make_base(con_mesh, parse_opacity=parse_opacity)
frag = con_mesh.frag
vert = con_mesh.vert
tese = con_mesh.tese
if arm_discard:
opac = mat_state.material.arm_discard_opacity
frag.write('if (opacity < {0}) discard;'.format(opac))
gapi = arm.utils.get_gapi()
if '_gbuffer2' in wrd.world_defs:
frag.add_out('vec4[3] fragColor')
if '_Veloc' in wrd.world_defs:
if tese == None:
vert.add_uniform('mat4 prevWVP', link='_prevWorldViewProjectionMatrix')
vert.add_out('vec4 wvpposition')
vert.add_out('vec4 prevwvpposition')
vert.write('wvpposition = gl_Position;')
if is_displacement:
vert.add_uniform('mat4 invW', link='_inverseWorldMatrix')
vert.write('prevwvpposition = prevWVP * (invW * vec4(wposition, 1.0));')
else:
vert.write('prevwvpposition = prevWVP * spos;')
else:
tese.add_out('vec4 wvpposition')
tese.add_out('vec4 prevwvpposition')
tese.write('wvpposition = gl_Position;')
if is_displacement:
tese.add_uniform('mat4 invW', link='_inverseWorldMatrix')
tese.add_uniform('mat4 prevWVP', '_prevWorldViewProjectionMatrix')
tese.write('prevwvpposition = prevWVP * (invW * vec4(wposition, 1.0));')
else:
vert.add_uniform('mat4 prevW', link='_prevWorldMatrix')
vert.add_out('vec3 prevwposition')
vert.write('prevwposition = vec4(prevW * spos).xyz;')
tese.add_uniform('mat4 prevVP', '_prevViewProjectionMatrix')
make_tess.interpolate(tese, 'prevwposition', 3)
tese.write('prevwvpposition = prevVP * vec4(prevwposition, 1.0);')
elif gapi.startswith('direct3d'):
vert.add_out('vec4 wvpposition')
vert.write('wvpposition = gl_Position;')
frag.add_out('vec4[2] fragColor')
else:
frag.add_out('vec4[2] fragColor')
# Pack gbuffer
frag.add_include('std/gbuffer.glsl')
if mat_state.material.arm_two_sided:
frag.write('if (!gl_FrontFacing) n *= -1;') # Flip normal when drawing back-face
frag.write('n /= (abs(n.x) + abs(n.y) + abs(n.z));')
frag.write('n.xy = n.z >= 0.0 ? n.xy : octahedronWrap(n.xy);')
frag.write('fragColor[0] = vec4(n.xy, packFloat(metallic, roughness), 1.0);')
frag.write('fragColor[1] = vec4(basecol.rgb, packFloat2(occlusion, specular));')
if '_gbuffer2' in wrd.world_defs:
if '_Veloc' in wrd.world_defs:
frag.write('vec2 posa = (wvpposition.xy / wvpposition.w) * 0.5 + 0.5;')
frag.write('vec2 posb = (prevwvpposition.xy / prevwvpposition.w) * 0.5 + 0.5;')
frag.write('fragColor[2].rg = vec2(posa - posb);')
if '_SSS' in wrd.world_defs or '_Hair' in wrd.world_defs:
frag.add_uniform('int materialID')
frag.write('fragColor[2].a = materialID;')
return con_mesh
def make_raytracer(con_mesh):
wrd = bpy.data.worlds['Arm']
vert = con_mesh.make_vert()
frag = con_mesh.make_frag()
vert.add_out('vec3 n')
vert.write('n = nor;')
vert.write('gl_Position = vec4(pos.xyz, 1.0);')
def make_forward_mobile(con_mesh):
wrd = bpy.data.worlds['Arm']
vert = con_mesh.make_vert()
frag = con_mesh.make_frag()
geom = None
tesc = None
tese = None
vert.add_uniform('mat3 N', '_normalMatrix')
vert.write_attrib('vec4 spos = vec4(pos.xyz, 1.0);')
frag.ins = vert.outs
make_attrib.write_vertpos(vert)
frag.add_include('compiled.inc')
frag.write('vec3 basecol;')
frag.write('float roughness;')
frag.write('float metallic;')
frag.write('float occlusion;')
frag.write('float specular;')
arm_discard = mat_state.material.arm_discard
blend = mat_state.material.arm_blending
is_transluc = mat_utils.is_transluc(mat_state.material)
parse_opacity = (blend and is_transluc) or arm_discard
if parse_opacity:
frag.write('float opacity;')
cycles.parse(mat_state.nodes, con_mesh, vert, frag, geom, tesc, tese, parse_opacity=parse_opacity, parse_displacement=False)
if arm_discard:
opac = mat_state.material.arm_discard_opacity
frag.write('if (opacity < {0}) discard;'.format(opac))
if con_mesh.is_elem('tex'):
vert.add_out('vec2 texCoord')
vert.add_uniform('float texUnpack', link='_texUnpack')
if mat_state.material.arm_tilesheet_mat:
vert.add_uniform('vec2 tilesheetOffset', '_tilesheetOffset')
vert.write('texCoord = tex * texUnpack + tilesheetOffset;')
else:
vert.write('texCoord = tex * texUnpack;')
if con_mesh.is_elem('col'):
vert.add_out('vec3 vcolor')
vert.write('vcolor = col.rgb;')
if con_mesh.is_elem('tang'):
vert.add_out('mat3 TBN')
make_attrib.write_norpos(con_mesh, vert, declare=True)
vert.write('vec3 tangent = normalize(N * tang);')
vert.write('vec3 bitangent = normalize(cross(wnormal, tangent));')
vert.write('TBN = mat3(tangent, bitangent, wnormal);')
else:
vert.add_out('vec3 wnormal')
make_attrib.write_norpos(con_mesh, vert)
frag.write_attrib('vec3 n = normalize(wnormal);')
frag.add_include('std/math.glsl')
frag.add_include('std/brdf.glsl')
frag.add_out('vec4 fragColor')
blend = mat_state.material.arm_blending
if blend:
if parse_opacity:
frag.write('fragColor = vec4(basecol, opacity);')
else:
frag.write('fragColor = vec4(basecol, 1.0);')
return
is_shadows = '_ShadowMap' in wrd.world_defs
frag.write('vec3 direct = vec3(0.0);')
if '_Sun' in wrd.world_defs:
frag.add_uniform('vec3 sunCol', '_sunColor')
frag.add_uniform('vec3 sunDir', '_sunDirection')
frag.write('float svisibility = 1.0;')
frag.write('float sdotNL = max(dot(n, sunDir), 0.0);')
if is_shadows:
vert.add_out('vec4 lightPosition')
vert.add_uniform('mat4 LWVP', '_biasLightWorldViewProjectionMatrix')
vert.write('lightPosition = LWVP * spos;')
frag.add_uniform('sampler2DShadow shadowMap')
frag.add_uniform('float shadowsBias', '_sunShadowsBias')
if '_CSM' in wrd.world_defs:
frag.add_include('std/shadows.glsl')
frag.add_uniform('vec4 casData[shadowmapCascades * 4 + 4]', '_cascadeData', included=True)
frag.add_uniform('vec3 eye', '_cameraPosition')
frag.write('svisibility = shadowTestCascade(shadowMap, eye, wposition + n * shadowsBias * 10, shadowsBias, shadowmapSize * vec2(shadowmapCascades, 1.0));')
else:
frag.write('if (lightPosition.w > 0.0) {')
frag.write(' vec3 lPos = lightPosition.xyz / lightPosition.w;')
frag.write(' svisibility = texture(shadowMap, vec3(lPos.xy, lPos.z - shadowsBias)).r;')
frag.write('}')
frag.write('direct += basecol * sdotNL * sunCol * svisibility;')
if '_SinglePoint' in wrd.world_defs:
frag.add_uniform('vec3 pointPos', '_pointPosition')
frag.add_uniform('vec3 pointCol', '_pointColor')
frag.write('float visibility = 1.0;')
frag.write('vec3 ld = pointPos - wposition;')
frag.write('vec3 l = normalize(ld);')
frag.write('float dotNL = max(dot(n, l), 0.0);')
if '_Spot' in wrd.world_defs:
frag.add_uniform('vec3 spotDir', link='_spotDirection')
frag.add_uniform('vec2 spotData', link='_spotData')
if is_shadows:
vert.add_out('vec4 spotPosition')
vert.add_uniform('mat4 LWVPSpot0', '_biasLightWorldViewProjectionMatrixSpot0')
vert.write('spotPosition = LWVPSpot0 * spos;')
frag.add_uniform('float pointBias', link='_pointShadowsBias')
frag.add_uniform('sampler2DShadow shadowMapSpot[1]')
frag.write('if (spotPosition.w > 0.0) {')
frag.write(' vec3 lPos = spotPosition.xyz / spotPosition.w;')
frag.write(' visibility = texture(shadowMap, vec3(lPos.xy, lPos.z - pointBias).r;')
frag.write('}')
elif is_shadows:
frag.add_include('std/shadows.glsl')
frag.add_uniform('vec2 lightProj', link='_lightPlaneProj')
frag.add_uniform('samplerCubeShadow shadowMapPoint[1]')
frag.add_uniform('float pointBias', link='_pointShadowsBias')
frag.write('const float s = shadowmapCubePcfSize;') # TODO: incorrect...
frag.write('float compare = lpToDepth(ld - n * pointBias * 80, lightProj);')
frag.write('visibility = texture(shadowMapPoint[0], vec4(-l + n * pointBias * 80, compare)).r;')
frag.write('direct += basecol * dotNL * pointCol * attenuate(distance(wposition, pointPos)) * visibility;')
if '_Clusters' in wrd.world_defs:
frag.add_include('std/light_mobile.glsl')
frag.write('vec3 albedo = basecol;')
frag.write('vec3 f0 = surfaceF0(basecol, metallic);')
make_cluster.write(vert, frag)
if '_Irr' in wrd.world_defs:
frag.add_include('std/shirr.glsl')
frag.add_uniform('vec4 shirr[7]', link='_envmapIrradiance', included=True)
env_str = 'shIrradiance(n)'
else:
env_str = '0.5'
frag.add_uniform('float envmapStrength', link='_envmapStrength')
frag.write('fragColor = vec4(direct + basecol * {0} * envmapStrength, 1.0);'.format(env_str))
if '_LDR' in wrd.world_defs:
frag.write('fragColor.rgb = pow(fragColor.rgb, vec3(1.0 / 2.2));')
def make_forward_solid(con_mesh):
wrd = bpy.data.worlds['Arm']
vert = con_mesh.make_vert()
frag = con_mesh.make_frag()
geom = None
tesc = None
tese = None
for e in con_mesh.data['vertex_elements']:
if e['name'] == 'nor':
con_mesh.data['vertex_elements'].remove(e)
break
vert.write_attrib('vec4 spos = vec4(pos.xyz, 1.0);')
frag.ins = vert.outs
make_attrib.write_vertpos(vert)
frag.add_include('compiled.inc')
frag.write('vec3 basecol;')
frag.write('float roughness;')
frag.write('float metallic;')
frag.write('float occlusion;')
frag.write('float specular;')
arm_discard = mat_state.material.arm_discard
blend = mat_state.material.arm_blending
is_transluc = mat_utils.is_transluc(mat_state.material)
parse_opacity = (blend and is_transluc) or arm_discard
if parse_opacity:
frag.write('float opacity;')
cycles.parse(mat_state.nodes, con_mesh, vert, frag, geom, tesc, tese, parse_opacity=parse_opacity, parse_displacement=False, basecol_only=True)
if arm_discard:
opac = mat_state.material.arm_discard_opacity
frag.write('if (opacity < {0}) discard;'.format(opac))
if con_mesh.is_elem('tex'):
vert.add_out('vec2 texCoord')
vert.add_uniform('float texUnpack', link='_texUnpack')
if mat_state.material.arm_tilesheet_mat:
vert.add_uniform('vec2 tilesheetOffset', '_tilesheetOffset')
vert.write('texCoord = tex * texUnpack + tilesheetOffset;')
else:
vert.write('texCoord = tex * texUnpack;')
if con_mesh.is_elem('col'):
vert.add_out('vec3 vcolor')
vert.write('vcolor = col.rgb;')
make_attrib.write_norpos(con_mesh, vert, write_nor=False)
frag.add_out('vec4 fragColor')
if blend and parse_opacity:
frag.write('fragColor = vec4(basecol, opacity);')
else:
frag.write('fragColor = vec4(basecol, 1.0);')
if '_LDR' in wrd.world_defs:
frag.write('fragColor.rgb = pow(fragColor.rgb, vec3(1.0 / 2.2));')
def make_forward(con_mesh):
wrd = bpy.data.worlds['Arm']
blend = mat_state.material.arm_blending
parse_opacity = blend and mat_utils.is_transluc(mat_state.material)
make_forward_base(con_mesh, parse_opacity=parse_opacity)
frag = con_mesh.frag
if not blend:
frag.add_out('vec4 fragColor')
frag.write('fragColor = vec4(direct + indirect, 1.0);')
if '_LDR' in wrd.world_defs:
frag.add_include('std/tonemap.glsl')
frag.write('fragColor.rgb = tonemapFilmic(fragColor.rgb);')
# frag.write('fragColor.rgb = pow(fragColor.rgb, vec3(1.0 / 2.2));')
# Particle opacity
if mat_state.material.arm_particle_flag and arm.utils.get_rp().arm_particles == 'GPU' and mat_state.material.arm_particle_fade:
frag.write('fragColor.rgb *= p_fade;')
def make_forward_base(con_mesh, parse_opacity=False):
global is_displacement
wrd = bpy.data.worlds['Arm']
arm_discard = mat_state.material.arm_discard
make_base(con_mesh, parse_opacity=(parse_opacity or arm_discard))
vert = con_mesh.vert
frag = con_mesh.frag
tese = con_mesh.tese
if arm_discard:
opac = mat_state.material.arm_discard_opacity
frag.write('if (opacity < {0}) discard;'.format(opac))
blend = mat_state.material.arm_blending
if blend:
frag.add_out('vec4 fragColor')
if parse_opacity:
frag.write('fragColor = vec4(basecol, opacity);')
else:
# frag.write('fragColor = vec4(basecol * lightCol * visibility, 1.0);')
frag.write('fragColor = vec4(basecol, 1.0);')
# TODO: Fade out fragments near depth buffer here
return
frag.write_init("""
vec3 vVec = normalize(eyeDir);
float dotNV = max(dot(n, vVec), 0.0);
""")
sh = tese if tese != None else vert
sh.add_out('vec3 eyeDir')
sh.add_uniform('vec3 eye', '_cameraPosition')
sh.write('eyeDir = eye - wposition;')
frag.add_include('std/light.glsl')
is_shadows = '_ShadowMap' in wrd.world_defs
frag.write('vec3 albedo = surfaceAlbedo(basecol, metallic);')
frag.write('vec3 f0 = surfaceF0(basecol, metallic);')
frag.write('vec3 direct = vec3(0.0);')
frag.add_uniform('bool receiveShadow')
if '_Sun' in wrd.world_defs:
frag.add_uniform('vec3 sunCol', '_sunColor')
frag.add_uniform('vec3 sunDir', '_sunDirection')
frag.write('float svisibility = 1.0;')
frag.write('vec3 sh = normalize(vVec + sunDir);')
frag.write('float sdotNL = dot(n, sunDir);')
frag.write('float sdotNH = dot(n, sh);')
frag.write('float sdotVH = dot(vVec, sh);')
if is_shadows:
frag.add_uniform('sampler2DShadow shadowMap')
frag.add_uniform('float shadowsBias', '_sunShadowsBias')
frag.write('if (receiveShadow) {')
if '_CSM' in wrd.world_defs:
frag.add_include('std/shadows.glsl')
frag.add_uniform('vec4 casData[shadowmapCascades * 4 + 4]', '_cascadeData', included=True)
frag.add_uniform('vec3 eye', '_cameraPosition')
frag.write('svisibility = shadowTestCascade(shadowMap, eye, wposition + n * shadowsBias * 10, shadowsBias, shadowmapSize * vec2(shadowmapCascades, 1.0));')
else:
if tese != None:
tese.add_out('vec4 lightPosition')
tese.add_uniform('mat4 LVP', '_biasLightViewProjectionMatrix')
tese.write('lightPosition = LVP * vec4(wposition, 1.0);')
else:
if is_displacement:
vert.add_out('vec4 lightPosition')
vert.add_uniform('mat4 LVP', '_biasLightViewProjectionMatrix')
vert.write('lightPosition = LVP * vec4(wposition, 1.0);')
else:
vert.add_out('vec4 lightPosition')
vert.add_uniform('mat4 LWVP', '_biasLightWorldViewProjectionMatrix')
vert.write('lightPosition = LWVP * spos;')
frag.write('vec3 lPos = lightPosition.xyz / lightPosition.w;')
frag.write('const vec2 smSize = shadowmapSize;')
frag.write('svisibility = PCF(shadowMap, lPos.xy, lPos.z - shadowsBias, smSize);')
frag.write('}') # receiveShadow
# is_shadows
frag.write('direct += (lambertDiffuseBRDF(albedo, sdotNL) + specularBRDF(f0, roughness, sdotNL, sdotNH, dotNV, sdotVH) * specular) * sunCol * svisibility;')
# sun
if '_SinglePoint' in wrd.world_defs:
frag.add_uniform('vec3 pointPos', link='_pointPosition')
frag.add_uniform('vec3 pointCol', link='_pointColor')
if is_shadows:
frag.add_uniform('vec2 lightProj', link='_lightPlaneProj', included=True)
frag.add_uniform('samplerCubeShadow shadowMapPoint[1]', included=True)
frag.add_uniform('float pointBias', link='_pointShadowsBias')
if '_Spot' in wrd.world_defs:
frag.add_uniform('vec3 spotDir', link='_spotDirection')
frag.add_uniform('vec2 spotData', link='_spotData')
if is_shadows:
frag.add_uniform('mat4 LWVPSpot0', '_biasLightWorldViewProjectionMatrixSpot0', included=True)
frag.add_uniform('sampler2DShadow shadowMapSpot[1]', included=True)
frag.write('direct += sampleLight(')
frag.write(' wposition, n, vVec, dotNV, pointPos, pointCol, albedo, roughness, specular, f0')
if is_shadows:
frag.write(' , 0, pointBias')
if '_Spot' in wrd.world_defs:
frag.write(' , true, spotData.x, spotData.y, spotDir')
frag.write(');')
if '_Clusters' in wrd.world_defs:
make_cluster.write(vert, frag)
if '_Brdf' in wrd.world_defs:
frag.add_uniform('sampler2D senvmapBrdf', link='_envmapBrdf')
frag.write('vec2 envBRDF = texture(senvmapBrdf, vec2(roughness, 1.0 - dotNV)).xy;')
if '_Irr' in wrd.world_defs:
frag.add_include('std/shirr.glsl')
frag.add_uniform('vec4 shirr[7]', link='_envmapIrradiance', included=True)
frag.write('vec3 indirect = shIrradiance(n);')
if '_EnvTex' in wrd.world_defs:
frag.write('indirect /= PI;')
frag.write('indirect *= albedo;')
if '_Rad' in wrd.world_defs:
frag.add_uniform('sampler2D senvmapRadiance', link='_envmapRadiance')
frag.add_uniform('int envmapNumMipmaps', link='_envmapNumMipmaps')
frag.write('vec3 reflectionWorld = reflect(-vVec, n);')
frag.write('float lod = getMipFromRoughness(roughness, envmapNumMipmaps);')
frag.write('vec3 prefilteredColor = textureLod(senvmapRadiance, envMapEquirect(reflectionWorld), lod).rgb;')
if '_EnvLDR' in wrd.world_defs:
frag.write('prefilteredColor = pow(prefilteredColor, vec3(2.2));')
frag.write('indirect += prefilteredColor * (f0 * envBRDF.x + envBRDF.y) * 1.5;')
elif '_EnvCol' in wrd.world_defs:
frag.add_uniform('vec3 backgroundCol', link='_backgroundCol')
frag.write('indirect += backgroundCol * f0;')
else:
frag.write('vec3 indirect = albedo;')
frag.write('indirect *= occlusion;')
frag.add_uniform('float envmapStrength', link='_envmapStrength')
frag.write('indirect *= envmapStrength;')
if '_VoxelGI' in wrd.world_defs or '_VoxelAO' in wrd.world_defs:
frag.add_include('std/conetrace.glsl')
frag.add_uniform('sampler3D voxels')
if '_VoxelGICam' in wrd.world_defs:
frag.add_uniform('vec3 eyeSnap', link='_cameraPositionSnap')
frag.write('vec3 voxpos = (wposition - eyeSnap) / voxelgiHalfExtents;')
else:
frag.write('vec3 voxpos = wposition / voxelgiHalfExtents;')
if '_VoxelAO' in wrd.world_defs:
frag.write('indirect *= vec3(1.0 - traceAO(voxpos, n, voxels));')
else:
frag.write('vec4 indirectDiffuse = traceDiffuse(voxpos, n, voxels);')
frag.write('indirect = indirect * voxelgiEnv + vec3(indirectDiffuse.rgb * voxelgiDiff * basecol);')
frag.write('if (specular > 0.0) {')
frag.write('vec3 indirectSpecular = traceSpecular(voxels, voxpos, n, vVec, roughness);')
frag.write('indirectSpecular *= f0 * envBRDF.x + envBRDF.y;')
frag.write('indirect += indirectSpecular * voxelgiSpec * specular;')
frag.write('}')
|
[
"lubos.lenco@gmail.com"
] |
lubos.lenco@gmail.com
|
18bc49a4a5fa8fcc62814b4b7994eab58a0a4286
|
17cadafa88ac5864a19b99e864953bbd9663cc02
|
/elementary/12.py
|
938030158ecfdc2604ba3fcf3c3d849d900743a0
|
[] |
no_license
|
Ascarik/Checkio
|
4b4a23d2bb2d3a62f36958d0717942cd9b42f7f6
|
4bafc893b1c1db88cdba9afed209b10c5453fe6f
|
refs/heads/master
| 2023-06-24T02:53:04.240687
| 2023-06-10T13:36:36
| 2023-06-10T13:36:36
| 230,712,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
def first_word(text: str) -> str:
return text.split(" ")[0]
if __name__ == '__main__':
print("Example:")
print(first_word("Hello world"))
# These "asserts" are used for self-checking and not for an auto-testing
assert first_word("Hello world") == "Hello"
assert first_word("a word") == "a"
assert first_word("hi") == "hi"
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"ascar@list.ru"
] |
ascar@list.ru
|
d2adb9a0f210f36cd6ff27fe3c481dcff845dab7
|
7c4a0a8f3b9a7e026647cfc48b067bb6cead0918
|
/labs/tests/tests_ec.py
|
a4e4fd953dac5784fde887b5e47bcb9f01a31b2e
|
[] |
no_license
|
endlessgate/bitLabs
|
8651acb5cc978456f0765736386cf53b39f8492f
|
50788c705614f8084bb778d4e1e23fbf595389e3
|
refs/heads/master
| 2021-07-05T05:20:43.076291
| 2019-09-22T05:13:11
| 2019-09-22T05:13:11
| 196,538,922
| 0
| 0
| null | 2020-10-27T21:51:02
| 2019-07-12T08:19:20
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
import hashlib
from labs.utils.keys import PrivateKey
from labs.utils.keys.ecdsa import (
sign,
recover,
verifies
)
def tests_ec():
keys = PrivateKey.make()
hashes = hashlib.sha3_256('testMessages'.encode()).digest()
sig_vrs = sign(hashes, keys.private_bytes)
recovery = recover(hashes, sig_vrs)
sig_rs = sig_vrs[:2]
if verifies(hashes, sig_rs, recovery):
print('verified')
else:
raise ValueError
|
[
"endlessgate1@gmail.com"
] |
endlessgate1@gmail.com
|
11fec0b6e83c48f5561ee16a40d78f2b90ed7d74
|
ae0a75d1a4a879961dc1391f0d0e9451e54dc15c
|
/ui/AccDialog.py
|
c58d5d40d6ab9e2005e5b3060fbfccf19f8fce55
|
[] |
no_license
|
QVllasa/PasswordManager
|
986313ceb05c8222efa47883bce2773baec495d3
|
aa775bae439156e7fb51602191f2d28168b2de29
|
refs/heads/master
| 2021-06-27T09:26:17.788159
| 2020-11-05T22:46:39
| 2020-11-05T22:46:39
| 180,325,358
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/AccDialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AccDialog(object):
def setupUi(self, AccDialog):
AccDialog.setObjectName("AccDialog")
AccDialog.resize(461, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(AccDialog)
self.buttonBox.setGeometry(QtCore.QRect(360, 10, 81, 241))
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayoutWidget = QtWidgets.QWidget(AccDialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 331, 271))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.comboBox = QtWidgets.QComboBox(self.verticalLayoutWidget)
self.comboBox.setObjectName("comboBox")
self.verticalLayout.addWidget(self.comboBox)
self.tableWidget = QtWidgets.QTableWidget(self.verticalLayoutWidget)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(1)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
self.tableWidget.horizontalHeader().setVisible(False)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.verticalLayout.addWidget(self.tableWidget)
self.retranslateUi(AccDialog)
self.buttonBox.accepted.connect(AccDialog.accept)
self.buttonBox.rejected.connect(AccDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AccDialog)
def retranslateUi(self, AccDialog):
_translate = QtCore.QCoreApplication.translate
AccDialog.setWindowTitle(_translate("AccDialog", "Show Accounts"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("AccDialog", "Neue Spalte"))
|
[
"qendrimvllasa@hotmail.de"
] |
qendrimvllasa@hotmail.de
|
d48b252d8c920ec2d420cb460653394e4e7176f0
|
8379fed4da71a1277983dafea7b70d432c2a66a3
|
/draw.py
|
c4aa4ab1ee5bcf27eb7ea7b0ceb27c3d4d374d48
|
[] |
no_license
|
alievilya/people_counter_detector
|
e3d1c1db113e9acaf6f6e4d222b2bd7c30427558
|
cd842e51675eb22081132163e20a03476da8d87f
|
refs/heads/main
| 2023-01-19T09:37:08.170937
| 2020-11-22T11:59:23
| 2020-11-22T11:59:23
| 314,923,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,660
|
py
|
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
# initialize the list of points for the rectangle bbox,
# the temporaray endpoint of the drawing rectangle
# the list of all bounding boxes of selected rois
# and boolean indicating wether drawing of mouse
# is performed or not
rect_endpoint_tmp = []
rect_bbox = []
bbox_list_rois = []
drawing = False
def select_object(img):
"""
Interactive select rectangle ROIs and store list of bboxes.
Parameters
----------
img :
image 3-dim.
Returns
-------
bbox_list_rois : list of list of int
List of bboxes of rectangle rois.
"""
# mouse callback function
def draw_rect_roi(event, x, y, flags, param):
# grab references to the global variables
global rect_bbox, rect_endpoint_tmp, drawing
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that drawing is being
# performed. set rect_endpoint_tmp empty list.
if event == cv2.EVENT_LBUTTONDOWN:
rect_endpoint_tmp = []
rect_bbox = [(x, y)]
drawing = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# drawing operation is finished
rect_bbox.append((x, y))
drawing = False
# draw a rectangle around the region of interest
p_1, p_2 = rect_bbox
cv2.rectangle(img, p_1, p_2, color=(0, 255, 0),thickness=1)
cv2.imshow('image', img)
# for bbox find upper left and bottom right points
p_1x, p_1y = p_1
p_2x, p_2y = p_2
lx = min(p_1x, p_2x)
ty = min(p_1y, p_2y)
rx = max(p_1x, p_2x)
by = max(p_1y, p_2y)
# add bbox to list if both points are different
if (lx, ty) != (rx, by):
bbox = [lx, ty, rx, by]
bbox_list_rois.append(bbox)
# if mouse is drawing set tmp rectangle endpoint to (x,y)
elif event == cv2.EVENT_MOUSEMOVE and drawing:
rect_endpoint_tmp = [(x, y)]
# clone image img and setup the mouse callback function
img_copy = img.copy()
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_rect_roi)
# keep looping until the 'c' key is pressed
while True:
# display the image and wait for a keypress
if not drawing:
cv2.imshow('image', img)
elif drawing and rect_endpoint_tmp:
rect_cpy = img.copy()
start_point = rect_bbox[0]
end_point_tmp = rect_endpoint_tmp[0]
cv2.rectangle(rect_cpy, start_point, end_point_tmp,(0,255,0),1)
cv2.imshow('image', rect_cpy)
key = cv2.waitKey(1) & 0xFF
# if the 'c' key is pressed, break from the loop
if key == ord('c'):
break
# close all open windows
cv2.destroyAllWindows()
return bbox_list_rois
def put_text_pil(img: np.array, txt: str):
im = Image.fromarray(img)
font_size = 15
font = ImageFont.truetype('CharisSILR.ttf', size=font_size)
draw = ImageDraw.Draw(im)
# здесь узнаем размеры сгенерированного блока текста
w, h = draw.textsize(txt, font=font)
y_pos = 30
im = Image.fromarray(img)
draw = ImageDraw.Draw(im)
# теперь можно центрировать текст
draw.text((int((img.shape[1] - 150)), 0), txt, fill='rgb(255, 255, 255)', font=font)
img = np.asarray(im)
return img
def draw_image(img, counting_appearance, show_img=False):
color = (0, 0, 0)
scale = max(img.shape[0:2]) / 416
line_width = int(2 * scale)
# text = f'{detection[5]:.2f} id: {person_id} '
text_counting = f'зашло: {counting_appearance["in"]}, вышло: {counting_appearance["out"]}'
cv2.rectangle(img, (img.shape[1] - 150, 0), (img.shape[1], 20), color, cv2.FILLED)
img = put_text_pil(img, text_counting)
# font = cv2.FONT_HERSHEY_DUPLEX
# font_scale = max(0.3 * scale, 0.3)
# thickness = max(int(1 * scale), 1)
# (text_width, text_height) = cv2.getTextSize(text_counting, font, fontScale=font_scale, thickness=thickness)[0]
# cv2.putText(img, text, (x1, y1), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)
# cv2.putText(img, text_counting, (x2, y2), font, font_scale, (124, 255, 255), thickness, cv2.LINE_AA)
if show_img:
cv2.imshow('detected', img)
cv2.waitKey(1)
return img
|
[
"aliev.ilia@yandex.ru"
] |
aliev.ilia@yandex.ru
|
3b12ed30d55a49b530994d01eebfb63d3b5c1753
|
2c896a677f0b4a06b1395f51e458ce1c0db23928
|
/List/Bootcamp/Pop Method.py
|
6fcdb77a78af1e085b3f24e41a8d9ce2884f5591
|
[] |
no_license
|
alakd1985/Python-with-Selenium
|
4a308762d4b7c4fd7d5145c474db7a0e8d926101
|
fdd1d4fcdfbe57f3505f9f4d204ac5ee1e4c7dcd
|
refs/heads/master
| 2022-12-18T20:16:44.296221
| 2020-09-20T21:45:18
| 2020-09-20T21:45:18
| 297,119,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
# it removes the last element of the list
l = [10, 20, 30, 40, 10, 20, 60, 50]
print(l.pop())
print(l)
# pop method can not be used in the empty list
print(l.pop(2))
print(l)
# clear method to remove all the elements in the list
print(l.clear())
print(l)
|
[
"alakd1985@gmail.com"
] |
alakd1985@gmail.com
|
bc6ea8c4c07ba0e42327bfce831ff72fd1ee03d2
|
a0f31a32590dde50ee78ddea8f74449b1f4f08c6
|
/truck_detect.py
|
ee69506d8a78d2b448a3096971863c4069612439
|
[] |
no_license
|
fung7890/AWSPortPrep
|
2e79b1480c69d7b557bd1f47374eca40b3e23fef
|
45a49ebc68edefa9900c3644d56c0545e09537db
|
refs/heads/master
| 2023-02-12T23:43:04.817847
| 2021-01-15T20:12:53
| 2021-01-15T20:12:53
| 329,722,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,049
|
py
|
import panoramasdk
import cv2
import numpy as np
import time
import boto3
# Global Variables
HEIGHT = 512
WIDTH = 512
class TruckDetection(panoramasdk.base):
def interface(self):
return {
"parameters":
(
("float", "threshold", "Detection threshold", 0.50),
("model", "truck_test_model",
"Model for testing truck detection, weights are currently for multiple COCO objects", "model"),
("int", "batch_size", "Model batch size", 1),
("float", "truck_index",
"truck index based on pretrained model's dataset, COCO", 8),
),
"inputs":
(
("media[]", "video_in", "Camera input stream"),
),
"outputs":
(
("media[video_in]", "video_out", "Camera output stream"),
)
}
"""init() function is called once by the Lambda runtime. It gives Lambda a chance to perform
any necessary initialization before entering the main loop."""
def init(self, parameters, inputs, outputs):
try:
# Frame Number Initialization
self.frame_num = 0
# # Index for truck from parameters
self.index = parameters.truck_index
# Set threshold for model from parameters
self.threshold = parameters.threshold
# set number of trucks
self.number_trucks = 0
# Load model from the specified directory.
print("loading the model...")
self.model = panoramasdk.model()
self.model.open(parameters.truck_test_model, 1)
print("model loaded")
# Create input and output arrays.
class_info = self.model.get_output(0)
prob_info = self.model.get_output(1)
rect_info = self.model.get_output(2)
self.class_array = np.empty(
class_info.get_dims(), dtype=class_info.get_type())
self.prob_array = np.empty(
prob_info.get_dims(), dtype=prob_info.get_type())
self.rect_array = np.empty(
rect_info.get_dims(), dtype=rect_info.get_type())
return True
except Exception as e:
print("Exception: {}".format(e))
return False
def preprocess(self, img):
resized = cv2.resize(img, (HEIGHT, WIDTH))
mean = [0.485, 0.456, 0.406] # RGB
std = [0.229, 0.224, 0.225] # RGB
img = resized.astype(np.float32) / \
255. # converting array of ints to floats
img_a = img[:, :, 0]
img_b = img[:, :, 1]
img_c = img[:, :, 2]
# Extracting single channels from 3 channel image
# The above code could also be replaced with cv2.split(img) << which will return 3 numpy arrays (using opencv)
# normalizing per channel data:
img_a = (img_a - mean[0]) / std[0]
img_b = (img_b - mean[1]) / std[1]
img_c = (img_c - mean[2]) / std[2]
# putting the 3 channels back together:
x1 = [[[], [], []]]
x1[0][0] = img_a
x1[0][1] = img_b
x1[0][2] = img_c
# x1 = mx.nd.array(np.asarray(x1))
x1 = np.asarray(x1)
return x1
def get_number_trucks(self, class_data, prob_data):
# get indices of truck detections in class data
truck_indices = [i for i in range(
len(class_data)) if int(class_data[i]) == self.index]
# use these indices to filter out anything that is less than 95% threshold from prob_data
prob_truck_indices = [
i for i in truck_indices if prob_data[i] >= self.threshold]
return prob_truck_indices
def entry(self, inputs, outputs):
self.frame_num += 1
for i in range(len(inputs.video_in)):
stream = inputs.video_in[i]
truck_image = stream.image
stream.add_label('Number of Trucks : {}'.format(
self.number_trucks), 0.6, 0.05)
x1 = self.preprocess(truck_image)
# Do inference on the new frame.
self.model.batch(0, x1)
self.model.flush()
# Get the results.
resultBatchSet = self.model.get_result()
class_batch = resultBatchSet.get(0)
prob_batch = resultBatchSet.get(1)
rect_batch = resultBatchSet.get(2)
class_batch.get(0, self.class_array)
prob_batch.get(0, self.prob_array)
rect_batch.get(0, self.rect_array)
class_data = self.class_array[0]
prob_data = self.prob_array[0]
rect_data = self.rect_array[0]
# Get Indices of classes that correspond to truck
truck_indices = self.get_number_trucks(class_data, prob_data)
print('Truck indices is {}'.format(truck_indices))
try:
self.number_trucks = len(truck_indices)
except:
self.number_trucks = 0
# Draw Bounding Boxes on HDMI Output
if self.number_trucks > 0:
for index in truck_indices:
left = np.clip(rect_data[index]
[0] / np.float(HEIGHT), 0, 1)
top = np.clip(rect_data[index][1] / np.float(HEIGHT), 0, 1)
right = np.clip(
rect_data[index][2] / np.float(HEIGHT), 0, 1)
bottom = np.clip(
rect_data[index][3] / np.float(HEIGHT), 0, 1)
stream.add_rect(left, top, right, bottom)
stream.add_label(str(prob_data[index][0]), right, bottom)
stream.add_label('Number of Truck : {}'.format(
self.number_truck), 0.6, 0.05)
self.model.release_result(resultBatchSet)
outputs.video_out[i] = stream
return True
def main():
TruckDetection().run()
main()
|
[
"kennyf_81@hotmail.com"
] |
kennyf_81@hotmail.com
|
8b82dc345349f14a1a0cf089c1e1be081d510428
|
d056d5d0cc6b16a577d0c0feda39995696907fbf
|
/heat/tests/openstack/blazar/test_host.py
|
ab3d750a96523200aa1393bb8ed1817e01751cbf
|
[
"Apache-2.0"
] |
permissive
|
bbc/heat
|
e53063ec24e0fbc003104f38c609f18e60f1da5b
|
71ea5cbcc5e03710559c7a5635117f2bc350f0d6
|
refs/heads/add-public-auth-uri
| 2022-07-23T04:52:41.575224
| 2019-04-08T18:00:32
| 2019-04-12T14:52:33
| 183,053,092
| 0
| 1
|
Apache-2.0
| 2022-07-06T20:06:00
| 2019-04-23T16:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 6,648
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from blazarclient import exception as client_exception
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import blazar
from heat.engine.resources.openstack.blazar import host
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
blazar_host_template = '''
heat_template_version: rocky
resources:
test-host:
type: OS::Blazar::Host
properties:
name: test-host
extra_capability:
gpu: true
'''
blazar_host_template_extra_capability = '''
heat_template_version: rocky
resources:
test-host:
type: OS::Blazar::Host
properties:
name: test-host
extra_capability:
gpu: true
name: test-name
'''
class BlazarHostTestCase(common.HeatTestCase):
def setUp(self):
super(BlazarHostTestCase, self).setUp()
self.host = {
"id": uuids.id,
"name": "test-host",
"gpu": True,
"hypervisor_hostname": "compute-1",
"hypervisor_type": "QEMU",
"hypervisor_version": 2010001,
"cpu_info": "{"
"'arch': 'x86_64', 'model': 'cpu64-rhel6', "
"'vendor': 'Intel', 'features': "
"['pge', 'clflush', 'sep', 'syscall', 'msr', "
"'vmx', 'cmov', 'nx', 'pat', 'lm', 'tsc', "
"'fpu', 'fxsr', 'pae', 'mmx', 'cx8', 'mce', "
"'de', 'mca', 'pse', 'pni', 'apic', 'sse', "
"'lahf_lm', 'sse2', 'hypervisor', 'cx16', "
"'pse36', 'mttr', 'x2apic'], "
"'topology': {'cores': 1, 'cells': 1, 'threads': 1, "
"'sockets': 4}}",
"memory_mb": 8192,
"local_gb": 100,
"vcpus": 2,
"service_name": "compute-1",
"reservable": True,
"trust_id": uuids.trust_id,
"created_at": "2020-01-01 08:00",
"updated_at": "2020-01-01 12:00",
"extra_capability": "foo"
}
t = template_format.parse(blazar_host_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['test-host']
self.client = mock.Mock()
self.patchobject(blazar.BlazarClientPlugin, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
self.client.host.create.return_value = self.host
return host.Host(name, snippet, stack)
def test_host_create(self):
host_resource = self._create_resource('host', self.rsrc_defn,
self.stack)
self.assertEqual(self.host['name'],
host_resource.properties.get(host.Host.NAME))
scheduler.TaskRunner(host_resource.create)()
self.assertEqual(uuids.id, host_resource.resource_id)
self.assertEqual((host_resource.CREATE, host_resource.COMPLETE),
host_resource.state)
self.assertEqual('host', host_resource.entity)
self.client.host.create.assert_called_once_with(
name=self.host['name'], gpu=self.host['gpu'])
def test_host_delete(self):
host_resource = self._create_resource('host', self.rsrc_defn,
self.stack)
scheduler.TaskRunner(host_resource.create)()
self.client.host.delete.return_value = None
self.client.host.get.side_effect = [
'host_obj', client_exception.BlazarClientException(code=404)]
scheduler.TaskRunner(host_resource.delete)()
self.assertEqual((host_resource.DELETE, host_resource.COMPLETE),
host_resource.state)
self.client.host.delete.assert_called_once_with(uuids.id)
def test_host_delete_not_found(self):
host_resource = self._create_resource('host', self.rsrc_defn,
self.stack)
scheduler.TaskRunner(host_resource.create)()
self.client.host.delete.side_effect = client_exception.\
BlazarClientException(code=404)
self.client.host.get.side_effect = client_exception.\
BlazarClientException(code=404)
scheduler.TaskRunner(host_resource.delete)()
self.assertEqual((host_resource.DELETE, host_resource.COMPLETE),
host_resource.state)
def test_parse_extra_capability(self):
t = template_format.parse(blazar_host_template_extra_capability)
stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(stack)
rsrc_defn = resource_defns['test-host']
host_resource = self._create_resource('host', rsrc_defn, stack)
args = dict((k, v) for k, v in host_resource.properties.items()
if v is not None)
parsed_args = host_resource._parse_extra_capability(args)
self.assertEqual({'gpu': True, 'name': 'test-host'}, parsed_args)
def test_resolve_attributes(self):
host_resource = self._create_resource('host', self.rsrc_defn,
self.stack)
scheduler.TaskRunner(host_resource.create)()
self.client.host.get.return_value = self.host
self.assertEqual(self.host['vcpus'],
host_resource._resolve_attribute(host.Host.VCPUS))
def test_resolve_attributes_not_found(self):
host_resource = self._create_resource('host', self.rsrc_defn,
self.stack)
scheduler.TaskRunner(host_resource.create)()
self.client.host.get.return_value = self.host
self.assertRaises(exception.InvalidTemplateAttribute,
host_resource._resolve_attribute,
'invalid_attribute')
|
[
"asmita.singh@nttdata.com"
] |
asmita.singh@nttdata.com
|
e04fac37b16be01109ed4330e8615540310fe539
|
96217d9919129b995ba2a66be0864143351b3d65
|
/Agent-Input/ssi/pipes/avemotione2e/scripts/avemotione2e.py
|
4be005adf7b2d3884d67a7b5736d813101b4614b
|
[] |
no_license
|
nadiacandoit/AVP
|
a87a84eb1e29f6575d0021fdda536cd187b1a3d9
|
5648adb3cb0059ff5d6fe27764d9a225a94ff050
|
refs/heads/master
| 2022-04-24T06:19:28.263178
| 2020-04-14T09:21:55
| 2020-04-14T09:21:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,960
|
py
|
'''
avemotione2e.py
authors: Johannes Wagner <wagner@hcm-lab.de>, Eduardo Coutinho <e.coutinho@imperial.ac.uk>
created: 2017/07/25
Copyright (C) University of Augsburg, Lab for Human Centered Multimedia
'''
import numpy as np
import math
from pathlib import Path
import time
import os
# Import tensorflow. See: https://github.com/ninia/jep/issues/81
import sys
sys.argv = ['']
#import tensorflow.python as tf
#from tensorflow.python.platform import tf_logging as logging
import tensorflow.contrib as tfc
import tensorflow as tf
import collections
import models
slim = tfc.slim
# def tf_install_and_import():
# import importlib
# try:
# print('here1')
# importlib.import_module('tensorflow')
# except ImportError:
# print('here2')
# import pip
# pip.main(['install', '--upgrade', 'https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.1-cp35-cp35m-win_amd64.whl'])
#
# finally:
# print('here3')
# globals()['tensorflow'] = importlib.import_module('tensorflow')
def get_parameters(opts,vars):
vars['seq_length'] = 25*opts['rec_length']
# Buffers to store audio and video raw signals
vars['video_buf'] = collections.deque(maxlen=vars['seq_length']) #*40
vars['audio_buf'] = collections.deque(maxlen=vars['seq_length']) #*40
# Model parameters
vars['batch_size'] = 1 # Batch size to use (always one as it will work in real-time)
vars['model'] = 'both' # Model to be used: audio,video, or audiovideo
vars['checkpoint_dir'] = './models/model.ckpt-4129' # Location of model
vars['hidden_units'] = 256 # Model parameter: number of hidden units in each LSTM layer
vars['hidden_layers'] = 2 # Model parameter: number of LSTM layers
#vars['num_examples'] = 50
return 1 # FLAGS
def load_model(vars):
frames = tf.placeholder(tf.float32, shape=(vars['batch_size'],vars['seq_length'],96, 96, 3))
audio = tf.placeholder(tf.float32, shape=(vars['batch_size'],vars['seq_length'],640))
# LSTM initial sates placeholder
initial_state_ph = tf.placeholder(tf.float32, [2, 2, vars['batch_size'], vars['hidden_units']])
l = tf.unstack(initial_state_ph, axis=0)
initial_state_t = tuple(
[ tf.nn.rnn_cell.LSTMStateTuple(l[idx][0],l[idx][1])
for idx in range(vars['hidden_layers'])]
)
init_array = np.zeros((1, vars['hidden_units']))
state_out = tuple([tf.nn.rnn_cell.LSTMStateTuple(init_array, init_array)
for idx in range(vars['hidden_layers'])])
# Create model
with slim.arg_scope(slim.nets.resnet_utils.resnet_arg_scope()):
prediction, st_o = models.get_model(vars['model'])(
frames, audio,
prev_hidden_states=initial_state_t,
hidden_units=vars['hidden_units'])
coord = tf.train.Coordinator()
variables_to_restore = slim.get_variables_to_restore()
#num_batches = int(math.ceil(vars['num_examples'] / (float(vars['batch_size'] * vars['seq_length']) )))
saver = tf.train.Saver(variables_to_restore)
sess = tf.Session()
saver.restore(sess, vars['checkpoint_dir'])
tf.train.start_queue_runners(sess=sess)
vars['tf_session'] = sess
vars['tf_state'] = state_out
vars['tf_coord'] = coord
vars['tf_prediction'] = prediction
vars['tf_st_o'] = st_o
vars['tf_frames'] = frames
vars['tf_audio'] = audio
vars['tf_initial_state_t'] = initial_state_t
return 0 #sess, coord, state_out, prediction, st_o, frames, audio, initial_state_t
def getOptions(opts,vars):
opts['rec_length'] = 5 # in case it was not set
pass
def consume_enter(sin, board, opts, vars):
get_parameters(opts,vars)
load_model(vars)
pass
def compute_av(vars,video_in,audio_in):
#
try:
pred, tf_state = vars['tf_session'].run(
[vars['tf_prediction'],vars['tf_st_o']], # [prediction],
feed_dict={vars['tf_frames']: video_in,
vars['tf_audio']: audio_in,
vars['tf_initial_state_t']: vars['tf_state']}
)
predictions = np.mean(np.reshape(pred, (-1, 2)),axis=0)
vars['tf_state'] = tf_state
except Exception as e:
print('Exception : ', e)
vars['tf_coord'].request_stop(e)
predictions = [0.0,0.0]
return predictions
def consume(info, sin, board, opts, vars):
vars['video_buf'].append(np.asarray(sin[0]) / 255)
vars['audio_buf'].append(np.asmatrix(sin[1]))
if len(vars['video_buf']) >= vars['seq_length']:
# check lenght!!!!
# print('length of video buffer: ',len(vars['video_buf']))
# print('\t. shape of video frame: ',vars['video_buf'][0].shape)
# print('length of audio buffer: ',len(vars['audio_buf']))
# print('\t. shape of audio frame: ',vars['audio_buf'][0].shape)
tf_video = np.array(vars['video_buf']).reshape(1,vars['seq_length'],96, 96, 3)
tf_audio = np.array(vars['audio_buf']).reshape(1,vars['seq_length'],640)
# print(tf_audio.shape)
# print(tf_video.shape)
predictions = compute_av(vars,tf_video,tf_audio)
print('\t* Arousal: ' + str(predictions[0]))
print('\t* Valence: ' + str(predictions[1]))
# what to do if frames are received in the meantime?
vars['video_buf'].clear()
vars['audio_buf'].clear()
def consume_flush(sin, board, opts, vars):
# runs at the end
pass
|
[
"peter.lavalle@gmail.com"
] |
peter.lavalle@gmail.com
|
e5dad19d318e172e32cb54de2c4eb656a1cec17e
|
0196f8f21247672bbfca3232d0d7f6764a929ccf
|
/stack.py
|
44c201da80620d62e92c8ef64c2a95d9d64a653d
|
[] |
no_license
|
h1dd3n01/DataStructures
|
2cf40ff4872d4432dd1996b21505b5a8499e5aec
|
e18567b409fda8933e78a557a6b5e805827e023c
|
refs/heads/master
| 2022-11-04T15:38:02.788370
| 2020-06-13T12:09:51
| 2020-06-13T12:09:51
| 268,105,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.top = None
self.size = 0
def push(self, data):
node = Node(data)
if self.top:
node.next = self.top
self.top = node
else:
self.top = node
self.size += 1
def pop(self):
data = None
if self.top:
data = self.top.data
if self.top.next:
self.top = self.top.next
else:
self.top = None
self.size -= 1
return data
def peek(self):
if self.top:
return self.top.data
|
[
"h1dd3n1@protonmail.com"
] |
h1dd3n1@protonmail.com
|
48d667cf4151d200ef909033d9daf17f9fba6c22
|
d10819b0048e9365743d92526875fee84ec1396a
|
/environments/forest.py
|
93cea186fbabdd0e9e18ef8995bdf69d783f8af8
|
[] |
no_license
|
nss-day-cohort-38/keahua-arboretum-the-bluebell-chateau
|
2c4b0f42e9c403ad13c3eac7c8b188263241f2bd
|
33597d153231de8ebab9a67f0fca5c96ce6edc1c
|
refs/heads/master
| 2022-04-25T14:01:32.454552
| 2020-04-28T16:01:22
| 2020-04-28T16:01:22
| 257,677,095
| 1
| 3
| null | 2020-04-28T16:01:24
| 2020-04-21T18:10:49
|
Python
|
UTF-8
|
Python
| false
| false
| 741
|
py
|
from interfaces import Identifiable
from interfaces import IContainsAnimals
from interfaces import IContainsPlants
class Forest(IContainsAnimals, IContainsPlants, Identifiable):
def __init__(self):
IContainsAnimals.__init__(self)
IContainsPlants.__init__(self)
Identifiable.__init__(self)
def add_animal(self, animal):
try:
self.animals.append(animal)
except AttributeError:
raise AttributeError("Error!")
def add_plant(self, plant):
try:
self.plants.append(plant)
except AttributeError:
raise AttributeError("Error!")
def __str__(self):
return(f"Forest [{str(self.id)[:8]}]")
# print(type(self.id))
|
[
"Jcsullins222@yahoo.com"
] |
Jcsullins222@yahoo.com
|
7d11029c0010795c311bf829960b396620d45f52
|
235c4b3aa630737b379050a420923efadd432da8
|
/DS-400/Medium/11-Container With Most Water/TwoPointers.py
|
86b3c14f907e1ef831bd4c6893ed2b21009cd60d
|
[
"MIT"
] |
permissive
|
ericchen12377/Leetcode-Algorithm-Python
|
4e5dc20062280ef46194da5480600b2459fd89f8
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
refs/heads/master
| 2023-02-22T22:43:55.612650
| 2021-01-28T04:00:20
| 2021-01-28T04:00:20
| 258,058,468
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
left, right = 0, len(height) - 1
area = 0
while left < right:
area = max(area, min(height[left], height[right]) * (right - left))
if(height[left] < height[right]):
left += 1
else:
right -= 1
return area
height = [1, 2, 1]
p = Solution()
print(p.maxArea(height))
|
[
"suiyaochen@mail.usf.edu"
] |
suiyaochen@mail.usf.edu
|
753981c7539428d4d187a2142f68f6200e4204a5
|
bedc747151dbc1e816e562b6753ba44e47a6f378
|
/sketch_generation_model/sketch_rnn/utils/misc.py
|
ff2c463b24f8189f4f8ac2182c9fb39aadf8a286
|
[] |
no_license
|
auswls/CS470
|
58f7d2924cf8131e761b52f63a06198da7f4c3f2
|
c65a1c57eb2e9d1384b8aa1aeb09241aa0e14334
|
refs/heads/master
| 2023-01-28T10:43:37.326936
| 2020-12-12T21:52:32
| 2020-12-12T21:52:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
"""
SketchRNN data loading and image manipulation utilities.
"""
import numpy as np
import torch
def get_max_len(strokes):
"""Return the maximum length of an array of strokes."""
max_len = 0
for stroke in strokes:
ml = len(stroke)
if ml > max_len:
max_len = ml
return max_len
def to_tensor(x):
if isinstance(x, torch.Tensor):
pass
elif isinstance(x, np.ndarray):
x = torch.from_numpy(x)
else:
raise Exception('input must be a tensor or ndarray.')
return x.float()
|
[
"vm3fk5dla2@kaist.ac.kr"
] |
vm3fk5dla2@kaist.ac.kr
|
9e06b0e76c2a4928edfb33c89dd56402f2b0323f
|
8b35d2308c23dfe564bd5260766daa6cf293d0a9
|
/snippets.py
|
bc55da826164a7a0b5ec1e517a1dad82af2871cc
|
[] |
no_license
|
darkrecher/leek
|
516264b762bcb374ca8467282854bd749902cfd6
|
0a1e4f4fb038c978a33a808e3984b4f81bc20f23
|
refs/heads/master
| 2021-04-05T23:53:54.112008
| 2018-03-09T16:12:39
| 2018-03-09T16:12:39
| 124,448,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,953
|
py
|
def read_file(pathfile):
with open(pathfile, 'r', encoding='utf-8') as file:
return file.read()
def read_file_by_lines(pathfile):
with open(pathfile, 'r', encoding='utf-8') as file:
for file_line in file:
print(file_line)
def write_file(pathfile, value):
with open(pathfile, 'a', encoding='utf-8') as file:
return file.write(value)
(
pl_guess, pl_int, pl_hex, pl_hexu, pl_hexl, pl_hexu_space, pl_hexl_space, pl_asc, pl_ut8,
pl_lint, pl_lasc, pl_lut8, pl_asc_lint, pl_asc_lhex, pl_all
) = range(15)
def _asc_to_lint(data):
return list(data.encode('ascii', 'replace'))
def _ut8_to_lint(data):
return list(data.encode('utf-8', 'replace'))
def _lasc_to_lint(datas):
data = ''.join(datas)
return list(data.encode('ascii', 'replace'))
def _lut8_to_lint(datas):
data = ''.join(datas)
return list(data.encode('utf-8', 'replace'))
def _int_to_lint(data):
lint = []
while data:
lint.insert(0, data % 256)
data = data // 256
return lint
def _hex_to_lint(data):
hex_digits_half_1 = data[::2]
hex_digits_half_2 = data[1::2]
hex_digits = zip(hex_digits_half_1, hex_digits_half_2)
lint = [
int('%s%s' % (dig_1, dig_2), 16)
for dig_1, dig_2
in hex_digits ]
return list(lint)
def _asc_lint_to_lint(asc_lint):
asc_lint = asc_lint.translate(str.maketrans(",.-_;", " "))
asc_lint_splitted = asc_lint.split()
return list([int(elem) for elem in asc_lint_splitted])
def _asc_lhex_to_lint(asc_lhex):
asc_lhex = asc_lhex.translate(str.maketrans(",.-_;", " "))
asc_lhex_splitted = asc_lhex.split()
return list([int(elem, 16) for elem in asc_lhex_splitted])
def _lint_to_int(lint):
big_int = 0
for cur_int in lint:
big_int *= 256
big_int += cur_int
return str(big_int)
def _lint_to_lhex(lint):
lhex = [
hex(cur_int)[2:].rjust(2, '0')
for cur_int in lint
]
return lhex
def _lint_to_hexu(lint):
lhex = _lint_to_lhex(lint)
return ''.join(lhex).upper()
def _lint_to_hexl(lint):
lhex = _lint_to_lhex(lint)
return ''.join(lhex).lower()
def _lint_to_hexu_space(lint):
lhex = _lint_to_lhex(lint)
return ' '.join(lhex).upper()
def _lint_to_hexl_space(lint):
lhex = _lint_to_lhex(lint)
return ' '.join(lhex).lower()
def _lint_to_asc(lint):
return str((bytes(lint)).decode("ascii", "replace"))
def _lint_to_ut8(lint):
return str((bytes(lint)).decode("utf-8", "replace"))
DICT_FUNCTION_IN_FROM_PL_TYPE = {
pl_int: _int_to_lint,
pl_hex: _hex_to_lint,
pl_hexu: _hex_to_lint,
pl_hexl: _hex_to_lint,
pl_asc: _asc_to_lint,
pl_ut8: _ut8_to_lint,
pl_lint: lambda data: data,
pl_lasc: _lasc_to_lint,
pl_lut8: _lut8_to_lint,
pl_asc_lint: _asc_lint_to_lint,
pl_asc_lhex: _asc_lhex_to_lint,
}
DICT_FUNCTION_OUT_FROM_PL_TYPE = {
pl_int: ('entier', _lint_to_int),
pl_hexu: ('hexa upcase', _lint_to_hexu),
pl_hexl: ('hexa lowcase', _lint_to_hexl),
pl_hexu_space: ('hexa-space upcase', _lint_to_hexu_space),
pl_hexl_space: ('hexa-space lowcase', _lint_to_hexl_space),
pl_asc: ('str ascii', _lint_to_asc),
pl_ut8: ('str utf-8', _lint_to_ut8),
pl_lint: ('liste entier', lambda data: str(data)),
}
PL_TYPES_FOR_PL_ALL = (pl_int, pl_lint, pl_hexu, pl_hexl, pl_hexu_space, pl_hexl_space, pl_asc, pl_ut8)
label_length_max = max([
len(value[0])
for key, value
in DICT_FUNCTION_OUT_FROM_PL_TYPE.items()
])
def _only_allowed_chars(str_data, allowed_chars):
unauthorized_chars = set(str_data) - set(allowed_chars)
return not bool(unauthorized_chars)
def _guess(data):
if isinstance(data, (list, tuple)):
if all([ isinstance(elem, int) for elem in data ]):
return pl_lint
if all([ isinstance(elem, str) for elem in data ]):
data = ''.join(data)
# Pas de détection d'encodage. C'est ascii ou utf-8. Tant pis si ça pète après.
try:
data.encode('ascii')
return pl_lasc
except:
return pl_lut8
if isinstance(data, str):
if _only_allowed_chars(data, '0123456789abcdefABCDEF'):
return pl_hex
if _only_allowed_chars(data, ',.-_;0123456789 '):
return pl_asc_lint
if _only_allowed_chars(data, ',.-_;0123456789 abcdefABCDEF'):
return pl_asc_lhex
# Toujours pas de détection d'encodage
try:
data.encode('ascii')
return pl_asc
except:
return pl_ut8
if isinstance(data, int):
return pl_int
return None
def plop(data, pl_type_out=pl_all, pl_type_in=pl_guess):
if pl_type_in == pl_guess:
pl_type_in = _guess(data)
if pl_type_in is None:
raise Exception("Fail guess")
function_in = DICT_FUNCTION_IN_FROM_PL_TYPE.get(pl_type_in)
if pl_type_in is None:
raise Exception("Fail arguments pl_type_in")
lint = function_in(data)
if pl_type_out == pl_all:
print('')
for pl_type_out_current in PL_TYPES_FOR_PL_ALL:
label, function_out = DICT_FUNCTION_OUT_FROM_PL_TYPE[pl_type_out_current]
try:
print('%s : %s' % (label.ljust(label_length_max), function_out(lint)))
except:
print('%s : %s' % (label.ljust(label_length_max), 'fail'))
print('')
else:
out_infos = DICT_FUNCTION_OUT_FROM_PL_TYPE.get(pl_type_out)
if out_infos is None:
raise Exception("Fail arguments pl_type_out")
function_out = out_infos[1]
return function_out(lint)
# snippets de code pour faire du ssh et du snmp à travers un rebond ssh.
# def start_ssh():
# self.ssh_client = paramiko.SSHClient()
# self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# # https://docs.python.org/2/library/getpass.html
# password = getpass.getpass('Veuillez entrer le mot de passe pour la connexion SSH : ')
# self.ssh_client.connect(ip, port=port, username=username, password=password)
#
# def config(
# self, version='v2c', community='NAGIOS', walker_ip='194.199.57.51', walker_port=50161,
# oid_prefix_in='1.3.6.1.4.1.42229.6.22.', line_prefix_out='SNMPv2-SMI::enterprises.42229.6.22.'
# ):
# """
# Le oid_prefix_in doit se terminer par un '.', sinon ça ne marche pas.
# Le line_prefix_out doit correspondre à l'oid_prefix_in.
# """
# self.version = version
# self.community = community
# self.walker_ip = walker_ip
# self.walker_port = walker_port
# self.oid_prefix_in = oid_prefix_in
# self.line_prefix_out = line_prefix_out
# param_commands = (self.version, self.community, self.walker_ip, str(self.walker_port))
# self._walk_commmand = 'snmpwalk -%s -c %s %s:%s ' % param_commands
# self._get_commmand = 'snmpget -%s -c %s %s:%s ' % param_commands
# self._set_commmand = 'snmpset -%s -c %s %s:%s ' % param_commands
#
# def test(self):
# stdin, stdout, stderr = self.ssh_client.exec_command('ls -l')
# logger.info(''.join(stdout))
if __name__ == '__main__':
plop(123456)
print('-' * 10)
plop('deadBEEF')
print('-' * 10)
plop('tralala;$_pouet')
print('-' * 10)
plop('abcdéèê')
print('-' * 10)
plop('αβ')
print('-' * 10)
plop(list(range(41)))
print('-' * 10)
plop(('a', 'b', 'c'))
print('-' * 10)
plop(('é', 'è', 'ñ'))
print('-' * 10)
print(plop('deadbeef', pl_lint, pl_hex))
print('-' * 10)
# Bof... Mais on n'a pas besoin de ça.
plop('a1,b2,100')
|
[
"wlanglois@imsnetworks.com"
] |
wlanglois@imsnetworks.com
|
6767929a79aa8aeed73feeab5279cf0591d10629
|
76aec59e63d34e8e4826dc33483c1a1a4c5264f8
|
/fpga_mnn.py
|
edbf905a67e79d2e415aa98bdc2a6f0c0bc2dfad
|
[] |
no_license
|
fzp0412/mnist_dnn
|
80f011e6fdd490c77f731ce7e0ab213a32ecd409
|
a16d867bba8a97e4e0813f4e4483a9550a6b7608
|
refs/heads/master
| 2021-09-06T01:16:53.234205
| 2018-02-01T08:22:42
| 2018-02-01T08:22:42
| 113,994,006
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
import numpy as np
from scipy import misc
import mnn_parameter
'''
three lay NN
two hide lay
hide 1 num = 128
hide 2 num = 64
hide 1 activate function relu
hide 2 activate function relu
relu = 0 when input <0
relu = input when input >=0
output lay activate function softmax
softmax = exp(zi)/sum(exp(zi))
loss funciton Cross Entropy
L = -sum(label*ln(y)+(1-label)*ln(1-y)
'''
'''
global value
'''
class_num = 10
hide1_num = 128
hide2_num = 64
'''
use numpy load mnist data from mnist.npz
'''
def load_data(path='mnist.npz'):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
'''
test funciton input data and label and w
output Correct rate
'''
def test_fun(data,label,z):
sum_num = label.shape[0]
right_num = 0
i=0
for i in range (sum_num) :
last_num = np.argmax(z[i])
if last_num == label[i]:
right_num +=1
return right_num/sum_num
'''
relu funciton
input X when Xij<0 output 0
else output X
'''
def relu_fun(x):
x[x<0]=0
return x
'''
output layer function
input x.shape(num,784)
input w1.shape(784,hide1_num)
input w2.shape(hide1_num,hide2_num)
input w3.shape(hide2_num,class_num)
output z.shape(num,class_num)
'''
def output_layer(x,w1,w2,w3):
z1 = np.dot(x,w1)
x2 = relu_fun(z1)
z2 = np.dot(x2,w2)
x3 = relu_fun(z2)
z3 = np.dot(x3,w3)
return x2,x3,z1,z2,z3
'''
last recognition function
input x.shape(num,784)
input w1.shape(784,hide1_num)
input w2.shape(hide1_num,hide2_num)
input w3.shape(hide2_num,class_num)
output z.shape(num,class_num)
output sum_exp_z.shape(num,1)
output y.shape(num,class_num)
'''
def recognition_fun(x,w1,w2,w3):
x2,x3,z1,z2,z3 = output_layer(x,w1,w2,w3)
exp_z = np.exp(z3)
exp_z =exp_z.T
y = exp_z/(exp_z.sum(axis =0))
y=y.T
return (x2,x3,z1,z2,z3,y)
'''
loss function
loss = -label*log(y)+(1-label)*log(1-y)
'''
def loss_fun(y,label):
one = np.ones((label.shape))
loss = -(label*np.log(y)+(one-label)*np.log(one-y))
loss = loss.sum()/label.shape[0]
return loss
'''
get paramater
'''
def get_para(params):
new_paras =[]
for param in params:
if param :
new_paras.append(param)
arrs = np.array(new_paras)
w1 = arrs[0][0]
w2 = arrs[1][0]
w3 = arrs[2][0]
return w1,w2,w3
'''
main function
'''
def run():
(x_train, y_train), (x_test, y_test) = load_data()
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_test /= 255
w1 =np.random.randn(784,hide1_num)
w2 =np.random.randn(hide1_num,hide2_num)
w3 =np.random.randn(hide2_num,class_num)
params = mnn_parameter.parameter
w1,w2,w3 =get_para(params)
tax2,tax3,taz1,taz2,taz3,tay = recognition_fun(x_test,w1,w2,w3)
tacc = test_fun(x_test,y_test,taz3)
print(tacc)
if __name__ =='__main__':
run()
|
[
"fzp0412@163.com"
] |
fzp0412@163.com
|
f7b39c281141f03aad1cab338612855d22d4bc15
|
a5df37342f3554789c9ac67e821ba206e2539676
|
/postgresql/Backup_postgresql.py
|
8022da4f9e5a865503a3eb86eb2a7d9423c1204a
|
[] |
no_license
|
vincent119/Python-Script
|
c92ac30088c17b78281402f5f71b1af287870004
|
e7191834dd3a59b0e97f28803b3ebf90bcf1ed14
|
refs/heads/master
| 2021-01-23T20:40:04.429504
| 2019-07-02T06:10:11
| 2019-07-02T06:10:11
| 44,096,268
| 0
| 1
| null | 2018-09-28T11:58:41
| 2015-10-12T09:06:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
#!/usr/bin/env python
#coding=utf8
import smtplib
import HTML
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import subprocess
import sys
import os
import time
import datetime
from time import strftime
from datetime import datetime, timedelta
from pyhtml import *
# 備份檔案路徑
bkDir='/Rmanbackup1/edb'
# postgres 執行檔路徑
pgPath='/edb/9.5AS/bin'
domainName='domain.com.tw'
superUser='enterprisedb'
##################
mailFROM = 'mailfrom@domain.com.tw'
mailTO = ['user1@domain.com.tw','user2@domain.com.tw']
mailSUBJECT = u'EDB backup Report'
SERVER = 'localhost'
##################
DBlist = [['db-01','postgres'],['db-02','postgres'],['edb-03','postgres']]
Status=[]
def RunCmd(cmdarg):
cmd=cmdarg
retcode = subprocess.call(cmd, shell=True)
if retcode != 0:
sys.exit(retcode)
else:
return retcode
def sizeof_fmt(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s %s" % (num, 'Yi', suffix)
def genhtmlreport(today,enDdate,Status):
starttime=today
endtime=enDdate
TEXT ='<!doctype html public '"-//w3c//dtd html 4.0 transitional//en"'><html><head><meta http-equiv='"Content-Type"' content='"text/html; charset=utf-8"'></head><body> <style>table {width:100%;}table, th, td {border: 1px solid black;border-collapse: collapse;}th, td {padding: 5px;text-align: left;}table#t01 tr:nth-child(even) {background-color: #eee;}table#t01 tr:nth-child(odd) {background-color:#fff;}table#t01 th {background-color: green;color: white;}</style></head><body>'
TEXT=TEXT+'<table id="t01">'+'Start Time: '+today+'</p>'
htmlcode = HTML.table(Status, header_row=['Server Name','DB Name','Run Time','DB Size'])
TEXT=TEXT+htmlcode+'</p>'+'End Time: '+enDdate+'</table></body></html>'
sendmail(TEXT)
def sendmail(RepHtml):
msg = MIMEMultipart('alternative')
msg['Subject'] = mailSUBJECT
msg['From'] = mailFROM
msg['To'] = ', '.join(mailTO)
part = MIMEText(RepHtml, 'html', 'utf-8')
msg.attach(part)
server = smtplib.SMTP(SERVER)
server.sendmail(mailFROM, mailTO, msg.as_string().encode('ascii'))
server.quit()
def tr(serverdata,htmdata):
fielddata=htmdata
fieldserver=serverdata
Table="<tr>"
for field in serverdata:
Table=Table+"<td>"+fielddata[fieldserver]+"</td>"
Table=Table+"</tr>"
return Table
def backupStart():
today=(datetime.now() + timedelta(days=0)).strftime('%Y-%m-%dT%H:%M')
for row in DBlist:
dbserver=row[0]
dbname=row[1]
dirpath=bkDir+'/'+dbserver
if os.path.exists(dirpath) == False:
os.mkdir(dirpath)
filepath=bkDir+'/'+dbserver+'/'+dbname+'.gz'
cmd=pgPath+'/pg_dump -h '+dbserver+'.'+domainName+' -U '+superUser+' -d '+dbname+' | gzip > ' +filepath
start = time.time()
RunCmd(cmd)
roundtrip = (time.time() - start)
Status.append([dbserver,dbname,format(roundtrip,'.2f'),str(sizeof_fmt(os.path.getsize(filepath)))])
enDdate=(datetime.now() + timedelta(days=0)).strftime('%Y-%m-%dT%H:%M')
genhtmlreport(today,enDdate,Status)
if __name__ == "__main__":
backupStart()
|
[
"vincent119@gmail.com"
] |
vincent119@gmail.com
|
d259a66a39d35b391fa8abaf2bc38ebe5f467633
|
ed55ecbf4f90022dc067c8a79f57c299e74849ea
|
/IV Lecture/Task/Tarea3_andrey_arguedas_ej2.py
|
bd2ef237dc7905275e9727370766a7eb7d356ce3
|
[] |
no_license
|
AndreyArguedas/Data-Science-Course
|
336d7e6ede090bac13f9dacadb4f5beed79d4396
|
8e4d02205fff3479ba7fcbf7e95c6270e297a133
|
refs/heads/master
| 2020-04-24T16:41:21.296047
| 2019-06-09T17:39:04
| 2019-06-09T17:39:04
| 172,117,673
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,690
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 24 20:04:59 2019
@author: Andrey
"""
from abc import ABCMeta, abstractmethod
# Clase Abstracta, ABC Class
class Base(metaclass = ABCMeta):
@abstractmethod
def __str__(self):
pass
@abstractmethod
def Captura(self):
pass
class Vuelo(Base):
def __init__(self, numero = 0, hora_salida = 0, hora_llegada = 0):
self.__numero = numero
self.__hora_salida = hora_salida
self.__hora_llegada = hora_llegada
@property
def numero(self):
return self.__numero
@numero.setter
def numero(self, numero):
self.__numero = numero
@property
def hora_salida(self):
return self.__hora_salida
@hora_salida.setter
def hora_salida(self, hora_salida):
self.__hora_salida = hora_salida
@property
def hora_llegada(self):
return self.__hora_llegada
@hora_llegada.setter
def hora_llegada(self, hora_llegada):
self.__hora_llegada = hora_llegada
def __str__(self):
return "Numero de vuelo:%i\nHora de Salida:%i\nHora de Llegada:%i" % (self.numero,self.hora_salida, self.hora_llegada)
def Captura(self):
self.numero = int(input("Digite el numero del vuelo: "))
self.hora_salida = int(input("Digite la hora de salida:"))
self.hora_llegada = int(input("Digite la hora de llegada:"))
class VueloCarga(Vuelo):
def __init__(self, numero = 0, hora_salida = 0, hora_llegada = 0, peso_maximo = 0):
super().__init__( numero, hora_salida, hora_llegada)
self.__peso_maximo = peso_maximo
@property
def peso_maximo(self):
return self.__peso_maximo
@peso_maximo.setter
def peso_maximo(self, peso_maximo):
self.__peso_maximo = peso_maximo
def __str__(self):
s = super().__str__()
s += "\nPeso Máximo:%f\n"
return s % (self.peso_maximo)
def Captura(self):
Vuelo.Captura(self)
self.peso_maximo = int(input("Digite el peso máximo:"))
class VueloComercial(Vuelo):
def __init__(self, numero = 0, hora_salida = 0, hora_llegada = 0, pasajeros = []):
super().__init__(numero, hora_salida, hora_llegada)
self.__pasajeros = pasajeros
@property
def pasajeros(self):
return self.__pasajeros
@pasajeros.setter
def peso_maximo(self, pasajeros):
self.__pasajeros = pasajeros
def monto_total_vendido(self):
acum = 0
for pasajero in self.pasajeros:
acum += pasajero.total_pagar()
return acum
def __str__(self):
s = "***************** Vuelo Comercial ************\n"
s += "=====Pasajeros====="
for pasajero in self.pasajeros:
s = s +"\n\n"+ str(pasajero)
s = s + "\n================"
return s
def Captura(self):
numeroPasajeros = int(input("Numero de pasajeros a ingresar:"))
for i in range(numeroPasajeros):
pasajero = Pasajero()
pasajero.Captura()
self.pasajeros.append(pasajero)
class VueloLocal(VueloComercial):
def __init__(self, numero = 0, hora_salida = 0, hora_llegada = 0, minimo_pasajeros = 0, porcentajeImpuesto = 0):
super().__init__(numero, hora_salida, hora_llegada)
self.__minimo_pasajeros = minimo_pasajeros
@property
def minimo_pasajeros(self):
return self.__minimo_pasajeros
@minimo_pasajeros.setter
def minimo_pasajeros(self, minimo_pasajeros):
self.__minimo_pasajeros = minimo_pasajeros
def __str__(self):
s = super().__str__()
s += "\nMinimo de pasajeros:%i\n"
return s % (self.minimo_pasajeros)
def Captura(self):
Vuelo.Captura(self)
self.minimo_pasajeros = int(input("Digite el mínimo de pasajeros:"))
class VueloInternacional(VueloComercial):
def __init__(self, numero = 0, hora_salida = 0, hora_llegada = 0, pais_destino = "no definido", porcentajeImpuesto = 0):
super().__init__( numero, hora_salida, hora_llegada)
self.__pais_destino = pais_destino
@property
def pais_destino(self):
return self.__pais_destino
@pais_destino.setter
def pais_destino(self, pais_destino):
self.__pais_destino = pais_destino
def __str__(self):
s = super().__str__()
s += "\nPaís de destino:%s\n"
return s % (self.pais_destino)
def Captura(self):
Vuelo.Captura(self)
self.pais_destino = input("Digite el país de destino:")
class Pasajero(Base):
def __init__(self, codigo = "", nombre = "", precio_boleto = 0, porcentajeImpuesto= 0):
self.__codigo = codigo
self.__nombre = nombre
self.__precio_boleto = precio_boleto
self.__porcentajeImpuesto = porcentajeImpuesto
@property
def codigo(self):
return self.__codigo
@codigo.setter
def codigo(self, codigo):
self.__codigo = codigo
@property
def nombre(self):
return self.__nombre
@nombre.setter
def nombre(self, nombre):
self.__nombre = nombre
@property
def precio_boleto(self):
return self.__precio_boleto
@precio_boleto.setter
def precio_boleto(self, precio_boleto):
self.__precio_boleto = precio_boleto
@property
def porcentajeImpuesto(self):
return self.__porcentajeImpuesto
@porcentajeImpuesto.setter
def porcentajeImpuesto(self, porcentajeImpuesto):
self.__porcentajeImpuesto = porcentajeImpuesto
def total_pagar(self):
return self.precio_boleto + self.porcentajeImpuesto * self.precio_boleto
def __str__(self):
return "Codigo:%s\nNombre:%s\nPrecio Boleto:%f\nPorcentaje Impuesto:%f" % (self.codigo,self.nombre, self.precio_boleto, self.porcentajeImpuesto)
def Captura(self):
self.codigo = input("Digite el codigo del pasajero:")
self.nombre = input("Digite el nombre del pasajero:")
self.precio_boleto = float(input("Digite el precio del tiquete:"))
self.porcentajeImpuesto = float(input("Digite el porcentaje del impuesto:"))
class PasajeroFrecuente(Pasajero):
def __init__(self, codigo = "", nombre = "", precio_boleto = 0, porcentajeImpuesto = 0, descuento = 0.2):
super().__init__(codigo, nombre, precio_boleto, porcentajeImpuesto)
self.__descuento = descuento
@property
def descuento(self):
return self.__descuento
@descuento.setter
def descuento(self, descuento):
self.__descuento = descuento
def total_pagar(self):
return self.precio_boleto + self.porcentajeImpuesto * self.precio_boleto - (self.precio_boleto * self.descuento)
def __str__(self):
s = super().__str__()
s += "\nDescuento:%s\n"
return s % (self.descuento)
def Captura(self):
Pasajero.Captura(self)
self.descuento = input("Digite el descuento del pasajero:")
import os
class Lectura:
def LeeDatosVuelo(self):
vuelo = Vuelo()
os.system('cls') #en windows
print("***Bienvenido a ingresar un vuelo***")
vuelo.Captura()
return vuelo
def LeeDatosVueloCarga(self):
vuelo = VueloCarga()
os.system('cls') #en windows
print("***Bienvenido a ingresar un vuelo de carga***")
vuelo.Captura()
return vuelo
def LeeDatosVueloInternacional(self):
vuelo = VueloInternacional()
os.system('cls') #en windows
print("***Bienvenido a ingresar un vuelo internacional***")
vuelo.Captura()
return vuelo
def LeeDatosVueloLocal(self):
vuelo = VueloLocal()
os.system('cls') #en windows
print("***Bienvenido a ingresar un vuelo internacional***")
vuelo.Captura()
return vuelo
class App:
def __init__(self):
self.__lista = list()
self.__lec = Lectura()
def __menu(self):
print("\n"*50)
os.system('cls') #en windows
print(" ==================================================== ")
print(" [1] Insertar Vuelo ")
print(" [2] Insertar Vuelo de carga")
print(" [3] Insertar Vuelo Internacional")
print(" [4] Insertar Vuelo Local")
print(" [5] Ver la Lista Polimorfica" )
print(" [6] Borrar la Lista Polimorfica")
print(" [7] Salir")
print(" ==================================================== ")
return input("> ")
def __mostrarLista(self):
print("\n"*50)
#os.system('Clear') #os.system('cls') #en windows
for i in range(len(self.__lista)):
print(self.__lista[i])
print(15 * "*" + "\n")
def principal(self):
respuesta = ""
while respuesta != "7":
respuesta = self.__menu()
if respuesta == "1":
self.__lista.append(self.__lec.LeeDatosVuelo())
elif respuesta == "2":
self.__lista.append(self.__lec.LeeDatosVueloCarga())
elif respuesta == "3":
self.__lista.append(self.__lec.LeeDatosVueloInternacional())
elif respuesta == "4":
self.__lista.append(self.__lec.LeeDatosVueloLocal())
elif respuesta == "5":
self.__mostrarLista()
input("Digite cualquier tecla para continuar...")
elif respuesta == "6":
self.__lista.clear()
prueba = App()
prueba.principal()
|
[
"andrey.arguedas.espinoza@est.una.ac.cr"
] |
andrey.arguedas.espinoza@est.una.ac.cr
|
c1873401ca4c74b9e6da57a7625939420c0ccad7
|
d6580417a7d161a34446cb565c406841f311e3a5
|
/pate_demo/params.py
|
8d32053f0aa63ffb7f7441047bb144f4f85fec0e
|
[] |
no_license
|
Songpei-Lu/capc-demo
|
b95e5d36c1444ae0cd90a810931c0f3007daeda8
|
511053e4917fdbcf90ff063b7a5ea3726a9010f1
|
refs/heads/main
| 2023-09-06T04:45:04.802279
| 2021-11-10T15:41:14
| 2021-11-10T15:41:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,120
|
py
|
from getpass import getuser
import argparse
import os
from argparse import ArgumentParser
import utils
from models.utils_models import model_size
from models.utils_models import set_model_size
import numpy as np
# from datasets.xray.dataset_pathologies import \
# get_chexpert_intersect_padchest_idexes
from datasets.xray.dataset_pathologies import \
get_padchest_intersect_chexpert_indexes
from datasets.xray.dataset_pathologies import get_chexpert_indexes
# import getpass
# user = getpass.getuser()
def get_parameters():
user = getuser()
noise_multiplier_dpsgd=1
clip_dpsgd=1
batch_size_dpsgd=128
dpsgd_enable=False
bool_params = []
bool_choices = ["True", "False"]
timestamp = utils.get_timestamp()
# commands = ['train_private_models']
# commands = ["query_ensemble_model", "retrain_private_models"]
commands = ['query_ensemble_model']
# commands = ['evaluate_big_ensemble_model']
# commands = ['retrain_private_models']
# commands = ['evaluate_ensemble_model']
# commands = ['test_models']
# commands = ['set_taus']
# commands = ['train_model_adaptively']
# commands = ['basic_model_stealing_attack']
# commands = ['adaptive_queries_only']
# dataset = 'mnist'
# dataset = 'fashion-mnist'
# dataset = 'cifar10'
# dataset = 'cifar100'
# dataset = 'svhn'
# dataset = 'chexpert'
# dataset = 'retinopathy'
# dataset = 'celeba'
# dataset = 'coco'
# dataset = "cxpert"
dataset = 'mnist'
# dataset = 'padchest'
# dataset = 'mimic'
# dataset = 'vin'
# pick_labels = [0, 1, 2, 3, 4]
pick_labels = None
num_querying_parties = 3
taskweights = False
xray_views = [""]
xray_datasets = ["cxpert", "padchest", "mimic", "vin"]
adam_amsgrad = False
dataset_type = "balanced"
# dataset_type = 'imbalanced'
# balance_type = 'perfect'
balance_type = "standard"
vote_type = "probability"
optimizer = "SGD"
log_every_epoch = 0
# debug = True
debug = False
if debug:
num_workers = 0
else:
num_workers = 8
begin_id = 0
momentum = 0.9
scheduler_type = "ReduceLROnPlateau"
scheduler_milestones = None
loss_type = "CE"
num_models = 10
default_model_size = None
if num_workers > 0:
device_ids = [0, 1, 2, 3]
# device_ids = [0, 1, 2]
else:
device_ids = [0]
# device_ids = [1]
querying_party_ids = [0, 1, 2]
if num_models == 1:
threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 0.01]
elif num_models == 5:
threshold, sigma_threshold, sigma_gnmax = [5.0, 3.0, 1.0]
elif num_models == 10:
threshold, sigma_threshold, sigma_gnmax = [10.0, 6.0, 2.0]
elif num_models == 50:
threshold, sigma_threshold, sigma_gnmax = [50.0, 30.0, 7.0]
elif num_models == 100:
threshold, sigma_threshold, sigma_gnmax = [135.0, 65.0, 25.0]
elif num_models == 150:
threshold, sigma_threshold, sigma_gnmax = [190.0, 110.0, 30.0]
elif num_models == 200:
threshold, sigma_threshold, sigma_gnmax = [245.0, 155.0, 35.0]
elif num_models == 250:
threshold, sigma_threshold, sigma_gnmax = [300.0, 200.0, 40.0]
elif num_models == 300:
threshold, sigma_threshold, sigma_gnmax = [355.0, 245.0, 50.0]
elif num_models == 400:
threshold, sigma_threshold, sigma_gnmax = [450.0, 300.0, 60.0]
else:
raise Exception(f"Unsupported number of models: {num_models}.")
multilabel_prob_threshold = 0.5
sigma_gnmax_private_knn = 28.0
selection_mode = "random"
# selection_mode = "entropy"
private_tau = 0
private_query_count = None
private_tau_norm = "2"
num_teachers_private_knn = 300
# For the release of the confidence values.
# threshold_confidence = 200
# sigma_threshold_confidence = 150
# sigma_gnmax_confidence = 40.0
# bins_confidence = 10
sigma_gnmax_confidence = None
bins_confidence = None
if dataset == "mnist":
momentum = 0.5
lr = 0.1
weight_decay = 1e-4
batch_size = 64
eval_batch_size = 1000
end_id = 1
num_epochs = 20
num_models = 250
# num_models = 250
# num_models = 1000
num_querying_parties = 1
selection_mode = "random"
# selection_mode = 'gap'
# selection_mode = 'entropy'
# selection_mode = 'deepfool'
# selection_mode = 'greedy'
# threshold = 300
# sigma_threshold = 200
# Scalable PATE
threshold = 200
sigma_threshold = 150
sigma_gnmax = 40.0
sigma_gnmax_private_knn = 28.0
# For releasing the confidence scores.
bins_confidence = 10
sigma_gnmax_confidence = 40.0
# We release the confidence scores for all the answered queries.
# threshold = 0
# sigma_threshold = 0
# sigma_gnmax = 40.0
# sigma_gnmax = 35.0
# sigma_gnmax = 28.0
# sigma_gnmax = 0.0
# sigma_gnmax = 28.0
# num_teachers_private_knn = 300
# Total privacy budget for releasing both the answers to the queries as
# well as the confidence scores for each query.
budget = 2.5
# budget = 10.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
architecture = "MnistNetPate"
# weak_classes = '1,2'
weak_classes = ""
class_type = "multiclass"
# class_type = 'multiclass_confidence'
dataset_type = "balanced"
balance_type = "standard"
vote_type = "probability"
# force the model size for mnist
default_model_size = model_size.big
# default_model_size = model_size.small
num_workers = 4
# num_workers = 0
device_ids = [0]
elif dataset == "fashion-mnist":
optimizer = "Adam"
# optimizer = 'SGD'
if optimizer == "Adam":
lr = 0.001
elif optimizer == "SGD":
lr = 0.01
momentum = 0.5
weight_decay = 1e-4
batch_size = 64
eval_batch_size = 1000
end_id = 1
num_epochs = 100
num_models = 250
threshold = 200.0
sigma_gnmax = 40.0
sigma_threshold = 150.0
budget = 2.5
num_teachers_private_knn = 300
# budget = 6.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
architecture = "FashionMnistNet"
weak_classes = ""
class_type = "multiclass"
# class_type = 'multiclass_confidence'
dataset_type = "balanced"
# balance_type = 'perfect'
balance_type = "standard"
log_every_epoch = 0
bins_confidence = 10
sigma_gnmax_confidence = 40.0
default_model_size = model_size.big
# default_model_size = model_size.small
num_workers = 6
# num_workers = 0
device_ids = [0, 1, 2]
selection_mode = "random"
elif dataset == "cifar10":
lr = 0.01
weight_decay = 1e-5
batch_size = 128
eval_batch_size = batch_size
end_id = 50
num_epochs = 500
num_models = 50
threshold = 50.0
sigma_gnmax = 7.0
sigma_gnmax_private_knn = 28.0
# num_teachers_private_knn = 800
# sigma_gnmax_private_knn = 100
sigma_threshold = 30.0
budget = 20.0
budgets = [budget]
architecture = "ResNet12"
# architecture = 'tresnet_m'
# weak_classes = '7,8,9'
weak_classes = ""
class_type = "multiclass"
# class_type = 'multiclass_confidence'
selection_mode = "random"
# selection_mode = 'gap'
# selection_mode = 'entropy'
# selection_mode = 'deepfool'
# selection_mode = 'greedy'
bins_confidence = 10
sigma_gnmax_confidence = 7.0
default_model_size = model_size.big
# default_model_size = model_size.small
num_workers = 6
# num_workers = 0
device_ids = [0, 1, 2]
elif dataset == "cifar100":
lr = 0.01
weight_decay = 1e-4
batch_size = 128
eval_batch_size = batch_size
end_id = 1
num_epochs = 500
num_models = 50
threshold = 50.0
sigma_gnmax = 7.0
sigma_threshold = 30.0
budget = 20.0
budgets = [budget]
num_teachers_private_knn = 300
# sigma_gnmax_private_knn = 85
architecture = "VGG5"
weak_classes = ""
class_type = "mutliclass"
dataset_type = "balanced"
balance_type = "perfect"
elif dataset == "svhn":
lr = 0.1
weight_decay = 1e-4
batch_size = 128
eval_batch_size = batch_size
end_id = 1
num_epochs = 200
num_models = 250
# threshold = 300.
# sigma_threshold = 200.0
# sigma_gnmax = 40.
threshold = 0
sigma_threshold = 0
sigma_gnmax = 35.0
sigma_gnmax_private_knn = 100
# budget = 2.0
budget = 3.0
# budget = 10.0
# budget = 6.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
num_teachers_private_knn = 300
# sigma_gnmax_private_knn = 85
architecture = "VGG7"
# architecture = 'ResNet6'
if architecture.startswith("ResNet"):
lr = 0.01
weight_decay = 1e-5
num_epochs = 300
# weak_classes = '1,2'
weak_classes = ""
class_type = "multiclass"
elif dataset == "chexpert":
optimizer = "Adam"
lr = 0.0001
weight_decay = 0.0001
batch_size = 32
eval_batch_size = 32
end_id = 1
num_models = 100
num_epochs = 300
budget = 20.0
# budget = 6.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
# architecture = 'chexpert-vgg11'
architecture = "densenet121"
weak_classes = ""
if debug:
num_workers = 0
else:
num_workers = 8
chexpert_dataset_type = "pos"
if chexpert_dataset_type in ["multi"]:
class_type = "multilabel"
elif chexpert_dataset_type in ["pos", "single"]:
class_type = "multiclass"
else:
raise Exception(
f"Unknown chexpert_dataset_type: {chexpert_dataset_type}.")
elif dataset == "retinopathy":
optimizer = "SGD" # adam
lr = 3e-3 # 0.001
end_id = 1
weight_decay = 5e-4 # 1e-5
momentum = 0.9
batch_size = 64
eval_batch_size = 64
num_epochs = 200
num_models = 50 # 63 (because divisible)
threshold = 50.0
sigma_gnmax = 7.0
sigma_threshold = 30.0
budget = 20.0
budgets = [budget]
architecture = (
"RetinoNet"
# can also use resnet50 or vgg16 (both are used on kaggle)
)
if architecture == "RetinoNet":
loss_type = "MSE"
if architecture.startswith("ResNet"):
lr = 0.01
weight_decay = 1e-5
num_epochs = 300
weak_classes = "0"
scheduler_type = "MultiStepLR"
scheduler_milestones = [150, 220]
class_type = "multiclass"
elif dataset == "celeba":
use_tau: bool = False
querying_party_ids = [-1]
num_querying_parties = -1
pick_labels = [x for x in range(20)]
if use_tau:
# class_type = 'multilabel_tau'
# class_type = 'multilabel_tau_dep'
class_type = "multilabel_tau_pate"
if class_type in ["multilabel_tau", "multilabel_tau_pate"]:
# private_tau_norm = '1'
private_tau_norm = "2"
if private_tau_norm == "1":
private_tau = 10.0
threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 12.0]
elif private_tau_norm == "2":
private_tau = np.sqrt(10)
threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 20.0]
else:
raise Exception(
f"Unsupported private tau norm: {private_tau_norm}")
# private_query_count = 133
private_query_count = None
budget = 0.0
elif class_type == "multilabel_tau_dep":
# private_tau = np.sqrt(40)
# private_tau = np.sqrt(20)
private_tau = np.sqrt(10)
# private_tau = 3.0
# private_tau = np.sqrt(1)
private_query_count = None
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 10.0]
threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 25.0]
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 35.0]
# threshold, sigma_threshold, sigma_gnmax = [50., 30., 22.]
# threshold, sigma_threshold, sigma_gnmax = [40., 20., 30.]
budget = 1000.0
else:
raise Exception(f"Unknown class_type: {class_type}.")
else:
# class_type = "multilabel"
class_type = 'multilabel_powerset'
# class_type = 'multilabel_pate'
private_tau = None
private_query_count = None
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 35.0]
# threshold, sigma_threshold, sigma_gnmax = [50, 30, 13]
# threshold, sigma_threshold, sigma_gnmax = [50., 30., 22.]
# threshold, sigma_threshold, sigma_gnmax = [40., 20., 30.]
threshold, sigma_threshold, sigma_gnmax = [0, 0, 1]
budget = 20.0
# budget = 218.73
taskweights = False
optimizer = "SGD"
lr = 0.001
weight_decay = 0.00001
momentum = 0.9
batch_size = 64
eval_batch_size = 64
num_models = 50
# num_models = 1
begin_id = 0
# end_id = num_models
end_id = 1
num_epochs = 100
# budget = 2.0
# budget = 6.0
# budget = 10.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
architecture = "CelebaNet"
loss_type = "BCEWithLogits"
weak_classes = ""
if debug:
num_workers = 0
else:
num_workers = 8
# class_type = 'multilabel_tau'
# class_type = 'multilabel_counting'
log_every_epoch = 1
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 0.]
# threshold, sigma_threshold, sigma_gnmax = [50., 30., 22.]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 0.0]
# multilabel_prob_threshold = 0.5
multilabel_prob_threshold = [
0.08080808080808081,
0.26262626262626265,
0.5555555555555556,
0.16161616161616163,
0.030303030303030304,
0.10101010101010102,
0.24242424242424243,
0.21212121212121213,
0.21212121212121213,
0.18181818181818182,
0.04040404040404041,
0.20202020202020204,
0.15151515151515152,
0.05050505050505051,
0.04040404040404041,
0.05050505050505051,
0.08080808080808081,
0.06060606060606061,
0.5151515151515152,
0.4747474747474748,
0.393939393939394,
0.5151515151515152,
0.030303030303030304,
0.12121212121212122,
0.8282828282828284,
0.30303030303030304,
0.05050505050505051,
0.23232323232323235,
0.05050505050505051,
0.11111111111111112,
0.04040404040404041,
0.5353535353535354,
0.20202020202020204,
0.2828282828282829,
0.18181818181818182,
0.10101010101010102,
0.3434343434343435,
0.08080808080808081,
0.07070707070707072,
0.8181818181818182,
]
labels_order = [
21,
37,
32,
19,
6,
20,
10,
25,
22,
36,
16,
18,
9,
3,
39,
17,
40,
31,
35,
30,
34,
13,
2,
1,
12,
8,
5,
14,
4,
23,
29,
15,
26,
28,
33,
7,
38,
24,
27,
11,
]
elif dataset == "pascal":
# querying_party_ids = [0, 1, 2]
querying_party_ids = [-1]
num_querying_parties = -1
optimizer = "SGD"
lr = 0.001
weight_decay = 0.0001
momentum = 0.9
batch_size = 32
eval_batch_size = 64
num_models = 50
# num_models = 1
begin_id = 0
end_id = num_models
# pick_labels = -1
# end_id = 1
num_epochs = 500
# budget = 2.0
# budget = 6.0
budget = 100.0
# budget = 20.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
architecture = "resnet50"
loss_type = "MultiLabelSoftMarginLoss"
weak_classes = ""
if debug:
num_workers = 0
else:
num_workers = 8
# commands = ['evaluate_big_ensemble_model']
# commands = ['query_ensemble_model']
class_type = 'multilabel'
# class_type = 'multilabel_powerset'
threshold, sigma_threshold, sigma_gnmax = [0, 0, 1]
# threshold, sigma_threshold, sigma_gnmax = [0, 0, 7]
pick_labels = [x for x in range(10)]
# pick_labels = [x for x in range(16)]
# pick_labels = [x for x in range(5)]
# class_type = 'multilabel_counting'
# class_type = 'multilabel_counting'
# class_type = "multilabel_tau_pate"
# class_type = 'multilabel_tau_data_independent'
# private_tau_norm = '2'
# private_tau_norm = '1'
# private_tau_norm = "2"
private_tau_norm = None
if private_tau_norm == "2":
private_tau = 1.8
elif private_tau_norm == "1":
private_tau = 3.4
log_every_epoch = 1
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 22.]
# threshold, sigma_threshold, sigma_gnmax = [50, 30, 9]
# threshold, sigma_threshold, sigma_gnmax = [50, 30, 9]
# threshold, sigma_threshold, sigma_gnmax = [50., 30., 22.]
multilabel_prob_threshold = [0.5]
elif dataset == "coco":
setting = 1
if setting == 1:
optimizer = "SGD"
momentum = 0.9
scheduler_type = "ReduceLROnPlateau"
lr = 0.01
# weight_decay = 0.00001
weight_decay = 0.0
loss_type = "BCEWithLogits"
elif setting == 2:
optimizer = "Adam"
scheduler_type = "OneCycleLR"
lr = 0.0002
weight_decay = 0.00001
loss_type = "AsymmetricLossOptimized"
else:
raise Exception(f"Unknown setting: {setting}.")
architecture = "tresnet_m"
batch_size = 128
eval_batch_size = 128
end_id = 1
num_models = 100
# num_models = 50
# num_models = 5
num_epochs = 100
# budget = 2.0
# budget = 6.0
budget = 20.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
weak_classes = ""
if debug:
num_workers = 0
else:
num_workers = 8
class_type = "multilabel"
log_every_epoch = 1
# threshold, sigma_threshold, sigma_gnmax = [50., 30., 7.]
threshold, sigma_threshold, sigma_gnmax = [1.0, 1.0, 7.0]
multilabel_prob_threshold = 0.8
elif dataset in xray_datasets:
architecture = f"densenet121_{dataset}"
# architecture = f'densenet121_mimic'
# architecture = f'densenet121_padchest'
# architecture = f'densenet121_cxpert'
num_querying_parties = -1
querying_party_ids = [-1]
# num_querying_parties = 3
# querying_party_ids = [0, 1, 2] # we increment it later on to be from 1
taskweights = True
adam_amsgrad = True
xray_views = ["AP", "PA"]
optimizer = "Adam"
scheduler_type = "ReduceLROnPlateau"
lr = 0.001
# weight_decay = 1e-6
weight_decay = 1e-5
# weight_decay = 1e-4
momentum = 0.9
# batch_size = 256
batch_size = 64
eval_batch_size = batch_size
# num_models = 50
# num_models = 1
# num_models = 10
if dataset == "cxpert":
# num_models = 1
num_models = 50
elif dataset == "padchest":
num_models = 20
# num_models = 10
# num_models = 50
begin_id = 0
end_id = num_models
# end_id = 50
# num_models = 1
# num_models = 5
num_epochs = 100
# budget = 2.0
# budget = 6.0
budget = 20.0
# budget = 8.0
# budget = float('inf')
# budgets = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 4.5]
budgets = [budget]
loss_type = "BCEWithLogits"
weak_classes = ""
if debug:
num_workers = 0
else:
num_workers = 8
log_every_epoch = 1
if dataset == "padchest":
# tau probability threshold per label
if num_models in [1, 10, 20, 50]:
# multilabel_prob_threshold = 0.04
multilabel_prob_threshold = [
0.05,
0.01,
0.15,
0.01,
0.01,
0.01,
np.nan,
0.04,
0.09,
0.04,
0.06,
np.nan,
np.nan,
0.02,
0.01,
0.02,
0.01,
0.01,
]
else:
raise Exception(f"Unsupported number of models: {num_models}.")
elif dataset == "cxpert":
# tau probability threshold per label
# multilabel_prob_threshold = [0.5]
multilabel_prob_threshold = [
0.53,
0.5,
0.18,
0.56,
0.56,
np.nan,
0.21,
np.nan,
0.23,
np.nan,
np.nan,
0.46,
0.7,
np.nan,
np.nan,
np.nan,
0.2,
0.32,
]
else:
multilabel_prob_threshold = [0.5]
# Pick labels from CheXpert test set for PadChest models
# Test PadChest models on the CheXpert data.
# pick_labels = [0, 1, 2, 3, 4, 8, 16, 17]
# pick_labels = get_chexpert_intersect_padchest_idexes()
# pick_labels = get_padchest_intersect_chexpert_indexes()
# pick_labels = get_chexpert_indexes()
# pick_labels = None # Original value
# pick_labels = [0, 1, 2, 3, 4] # for cxpert
# pick_labels = [0, 1, 2, 3, 4, 6, 8, 11, 12, 16, 17]
pick_labels = [0, 1, 2, 3, 4, 6, 8, 11, 12, 16, 17]
pick_labels = pick_labels[:6]
# commands = ['evaluate_big_ensemble_model']
commands = ['query_ensemble_model']
class_type = 'multilabel'
# class_type = "multilabel_powerset"
threshold, sigma_threshold, sigma_gnmax = [0, 0, 7]
# class_type = 'multilabel_counting'
# class_type = 'multilabel_counting_gaussian'
# class_type = 'multilabel_tau'
# class_type = "multilabel_tau_pate"
if (
(pick_labels == [0, 1, 2, 3, 4])
and (num_models == 50)
and (dataset == "padchest")
and (architecture == "densenet121_cxpert")
):
threshold, sigma_threshold, sigma_gnmax = [50, 30, 9]
if dataset == "padchest" and num_models == 10:
if pick_labels is None:
# threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 18.]
threshold, sigma_threshold, sigma_gnmax = [50, 30, 7.0]
elif pick_labels == [1, 4]:
threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 7.0]
elif pick_labels == [9]:
threshold, sigma_threshold, sigma_gnmax = [0.01, 0.01, 5.0]
elif pick_labels == [0, 1, 2, 3, 4]:
threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 9.0]
else:
# threshold, sigma_threshold, sigma_gnmax = [0, 0, 0]
# threshold, sigma_threshold, sigma_gnmax = [50, 30, 7]
threshold, sigma_threshold, sigma_gnmax = [0, 0, 7]
# threshold, sigma_threshold, sigma_gnmax = [0, 0, 3]
if class_type in ["multilabel_tau", "multilabel_tau_pate"]:
# private_tau_norm = '1'
private_tau_norm = "2"
if private_tau_norm == "1":
private_tau = 8
threshold, sigma_threshold, sigma_gnmax = [0, 0, 12]
else:
private_tau = np.sqrt(8)
threshold, sigma_threshold, sigma_gnmax = [0, 0, 16]
else:
pass
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 11]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 20]
# threshold, sigma_threshold, sigma_gnmax = [0, 0, 0]
# threshold, sigma_threshold, sigma_gnmax = [50, 30, 7]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 14.0]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 7.0]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 6.0]
# powerset all labels cxpert
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 1.1]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 21]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 7.0]
# threshold, sigma_threshold, sigma_gnmax = [0.0, 0.0, 20.0]
else:
raise Exception("Unknown dataset: {}".format(dataset))
if debug is True:
debug = "True"
else:
debug = "False"
parser = argparse.ArgumentParser(
description="Confidential And Private Collaborative Learning"
)
# Command parameters (what to run).
parser.add_argument(
"--commands",
nargs="+",
type=str,
# default=['train_private_models'],
# default=['query_ensemble_model', 'retrain_private_models'],
default=commands,
help="which commands to run",
)
parser.add_argument("--timestamp", type=str, default=timestamp,
help="timestamp")
parser.add_argument(
"--path",
type=str,
default=f"/home/nicolas/code/capc-learning-ahmad2",
help="path to the project",
)
parser.add_argument(
"--data_dir", type=str, default=f"/home/nicolas/data",
help="path to the data"
)
# General parameters
parser.add_argument(
"--dataset", type=str, default=dataset, help="name of the dataset"
)
parser.add_argument(
"--architecture", type=str, default="MnistNetPate", help="model architecture"
)
parser.add_argument(
"--class_type",
type=str,
# the below naming convention is from scikit-learn
# default='binary',
# default='multiclass',
# default='multilabel',
default=class_type,
help="The type of the classification: binary, multiclass with a "
"single class per data item, and multilabel classification with "
"zero or more possible classes assigned to a data item.",
)
parser.add_argument(
"--dataset_type",
type=str,
default=dataset_type,
# default='balanced',
# default='imbalanced',
help="Type of the dataset.",
)
parser.add_argument(
"--balance_type",
type=str,
# default='perfect', # distribute the classes to subsets evenly.
# default='standard', # divide a dataset into subset arbitrarily.
default=balance_type,
help="Type of the balance of classes in the dataset.",
)
parser.add_argument(
"--begin_id",
type=int,
default=begin_id,
help="train private models with id number in [begin_id, end_id)",
)
parser.add_argument(
"--end_id",
type=int,
default=end_id,
help="train private models with id number in [begin_id, end_id)",
)
parser.add_argument(
"--num_querying_parties",
type=int,
default=num_querying_parties,
help="number of parties that pose queries",
)
parser.add_argument(
"--querying_party_ids",
type=int,
nargs="+",
default=querying_party_ids,
help="the id of the querying party",
)
parser.add_argument(
"--mode",
type=str,
# default='random',
# default='entropy',
# default='gap',
# default='greedy',
# default='deepfool',
default=selection_mode,
help="method for generating utility scores",
)
parser.add_argument(
"--weak_classes", type=str, default=weak_classes,
help="indices of weak classes"
)
parser.add_argument(
"--weak_class_ratio",
type=float,
default=0.1,
help="ratio of samples belonging to weak classes",
)
parser.add_argument(
"--verbose",
default="True",
# default=False,
type=str,
choices=bool_choices,
help="Detail info",
)
bool_params.append("verbose")
parser.add_argument(
"--debug",
default=debug,
# default=False,
type=str,
choices=bool_choices,
help="Debug mode of execution",
)
bool_params.append("debug")
parser.add_argument(
"--use_pretrained_models",
default="False",
# default="True",
type=str,
choices=bool_choices,
help="Pretrained weights for the initial training of models on private "
"data",
)
bool_params.append("use_pretrained_models")
parser.add_argument(
"--retrain_fine_tune",
default="False",
# default=False,
type=str,
choices=bool_choices,
help="Pretrained weights for retraining models",
)
bool_params.append("retrain_fine_tune")
parser.add_argument(
"--sep", default=";", type=str, help="Separator for the output log."
)
parser.add_argument(
"--log_every_epoch",
default=log_every_epoch,
type=int,
help="Log test accuracy every n epchos.",
)
parser.add_argument(
"--test_virtual",
default=False,
action="store_true",
help="False for normal, True to train a larger qa model",
)
# Training parameters
parser.add_argument(
"--optimizer",
type=str,
default=optimizer,
# default='SGD',
help="The type of the optimizer.",
)
parser.add_argument(
"--adam_amsgrad",
type=bool,
default=adam_amsgrad,
help="amsgrad param for Adam optimizer",
)
parser.add_argument(
"--loss_type",
type=str,
default=loss_type,
# default='CE',
help="The type of the loss (e.g., MSE, CE, BCE, etc.).",
)
parser.add_argument(
"--batch_size", type=int, default=batch_size,
help="batch size for training"
)
parser.add_argument(
"--eval_batch_size",
type=int,
default=eval_batch_size,
help="batch size for evaluation",
)
parser.add_argument(
"--adaptive_batch_size",
type=int,
default=5,
help="batch size for adaptive training",
)
parser.add_argument(
"--patience", type=int, default=None,
help="patience for adaptive training"
)
parser.add_argument(
"--target_model",
type=str,
default="victim",
help="steal 1 model (victim) or pate model (pate) or a "
"different pate (another_pate)",
)
parser.add_argument(
"--shuffle_dataset",
action="store_true",
default=False,
help="shuffle dataset before split to train private "
"models. only implemented for mnist",
)
parser.add_argument(
"--num_optimization_loop",
type=int,
default=20,
help="num_optimization_loop for adaptive training with bayesian "
"optimization",
)
parser.add_argument(
"--num_classes",
type=int,
default=10,
)
parser.add_argument("--momentum", type=float, default=momentum,
help="SGD momentum")
parser.add_argument(
"--weight_decay",
type=float,
default=weight_decay,
help="L2 weight decay factor",
)
parser.add_argument("--seed", type=int, default=111, help="random seed")
parser.add_argument("--lr", type=float, default=lr,
help="initial learning rate")
parser.add_argument(
"--lr_factor", type=float, default=0.1,
help="learning rate decay factor"
)
parser.add_argument(
"--lr_epochs",
type=int,
nargs="+",
default=[2],
help="Epoch when learning rate decay occurs.",
)
parser.add_argument(
"--num_epochs",
type=int,
default=num_epochs,
help="number of epochs for training",
)
parser.add_argument(
"--attacker_dataset",
default=None,
# default='svhn',
# default='fashion-mnist',
# default='mnist',
type=str,
help="dataset used by model extraction attack, default to be the same "
"as dataset",
)
parser.add_argument(
"--architectures",
nargs="+",
type=str,
# default=['VGG16', 'VGG19', 'VGG5', 'VGG13', 'VGG11'],
# default=['ResNet8', 'ResNet10'],
# default=['VGG'],
default=[architecture],
help="The architectures of heterogeneous models.",
)
parser.add_argument(
"--model_size",
type=model_size,
choices=list(model_size),
default=default_model_size,
help="The size of the model.",
)
parser.add_argument(
"--device_ids",
nargs="+",
type=int,
default=device_ids,
# default=[0, 1, 2, 3],
# default=[0],
help="Cuda visible devices.",
)
parser.add_argument(
"--scheduler_type",
type=str,
default=scheduler_type,
# default='ReduceLROnPlateau',
# default='MultiStepLR',
help="Type of the scheduler.",
)
parser.add_argument(
"--scheduler_milestones",
nargs="+",
type=int,
default=scheduler_milestones,
help="The milestones for the multi-step scheduler.",
)
parser.add_argument(
"--schedule_factor", type=float, default=0.1,
help="The factor for scheduler."
)
parser.add_argument(
"--schedule_patience", type=int, default=10,
help="The patience for scheduler."
)
parser.add_argument(
"--num_workers",
type=int,
default=num_workers,
help="Number of workers to fetch data.",
)
# Privacy parameters
parser.add_argument(
"--num_models", type=int, default=num_models,
help="number of private models"
)
# Standard PATE mechanism
parser.add_argument(
"--budget",
type=float,
default=budget,
help="pre-defined epsilon value for (eps, delta)-DP",
)
parser.add_argument(
"--budgets",
nargs="+",
type=float,
default=budgets,
help="pre-defined epsilon value for (eps, delta)-DP",
)
parser.add_argument(
"--threshold",
type=float,
default=threshold,
help="threshold value (a scalar) in the threshold mechanism",
)
# Confident GNMAX.
parser.add_argument(
"--sigma_gnmax",
type=float,
default=sigma_gnmax,
help="std of the Gaussian noise in the GNMax mechanism",
)
parser.add_argument(
"--sigma_gnmax_private_knn",
type=float,
default=sigma_gnmax_private_knn,
help="std of the Gaussian noise in the GNMax mechanism used for the pknn cost",
)
parser.add_argument(
"--sigma_threshold",
type=float,
default=sigma_threshold,
help="std of the Gaussian noise in the threshold mechanism",
)
# For releasing the confidence scores.
parser.add_argument(
"--sigma_gnmax_confidence",
type=float,
default=sigma_gnmax_confidence,
help="std of the Gaussian noise in the GNMax mechanism for releasing "
"the confidence scores",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
)
parser.add_argument(
"--bins_confidence",
type=int,
default=bins_confidence,
help="Number of confidence bins. We discretize the softmax vector by "
"creating a histogram and mapping each element to the midpoint of "
"the bin it belongs to.",
)
# For tau-approximation.
parser.add_argument(
"--private_taus",
nargs="+",
type=float,
default=[private_tau],
help="The value of tau for the tau-approximation where we limit the "
"sensitivity of a given teacher by limiting the positive votes to"
"tau (in an L-norm).",
)
parser.add_argument(
"--private_tau_norm",
type=str,
# default='1',
# default='2',
default=private_tau_norm,
help="The norm for the tau-approximation.",
)
parser.add_argument(
"--private_query_count",
type=int,
default=private_query_count,
help="The number of queries to be answered privately. This is for the "
"data independent privacy analysis_test with tau approximation.",
)
parser.add_argument(
"--poisson_mechanism",
default="False",
type=str,
choices=bool_choices,
help="Apply or disable the poisson mechanism.",
)
bool_params.append("poisson_mechanism")
# Parameters for the coco dataset.
parser.add_argument(
"--multilabel_prob_threshold",
default=multilabel_prob_threshold,
type=float,
nargs="+",
help="threshold value",
)
parser.add_argument(
"--coco_version", default="2017", type=str,
help="the year of the dataset"
)
parser.add_argument(
"--coco_image_size",
default=448,
type=int,
help="input image size (default: 448)",
)
parser.add_argument(
"--coco_data_loader",
type=str,
help="standard or custom data loader, where custom uses"
"the pre-generated labels",
default="custom",
)
parser.add_argument(
"--coco_datasets",
nargs="+",
type=str,
default=["train", "val"],
# default=['train', 'val', 'test', 'unlabeled'],
help="Which datasets for original coco to load into the total data pool.",
)
parser.add_argument(
"--coco_additional_datasets",
nargs="+",
type=str,
# default=['test', 'unlabeled'],
default=[],
help="Which datasets for original coco to load into the total data pool.",
)
# add args for chexpert
parser = get_chexpert_paremeters(parser=parser, timestamp=timestamp)
# cxpert dataset - chexpert version from https://arxiv.org/pdf/2002.02497.pdf
parser.add_argument("--data_aug", type=bool, default=True, help="")
parser.add_argument("--data_aug_rot", type=int, default=45, help="")
parser.add_argument("--data_aug_trans", type=float, default=0.15, help="")
parser.add_argument("--data_aug_scale", type=float, default=0.15, help="")
parser.add_argument(
"--taskweights",
default=taskweights,
type=bool,
help="Assign weight to tasks/labels based on their "
"number of nan (not a number) values.",
)
parser.add_argument("--label_concat", type=bool, default=False, help="")
parser.add_argument("--label_concat_reg", type=bool, default=False, help="")
parser.add_argument("--labelunion", type=bool, default=False, help="")
parser.add_argument("--featurereg", type=bool, default=False, help="")
parser.add_argument("--weightreg", type=bool, default=False, help="")
"""
The abbreviations PA and AP stand for posteroanterior and anteroposterior,
respectively. These describe the pathway of the x-rays through the patient
to the detector (or, in the old days, film). In a PA projection, the front
of the patient’s chest is against the detector and the x-rays pass through
the back (posterior)of the patient, through the front (anterior) of the
chest and then strike the detector. This is the usual projection obtained
in an ambulatory patient. In a patient who cannot stand, a cassette
containing the detector can be placed behind the patient’s back
(while they’re lying or sitting up in a gurney or hospital bed,
for example)and the exposure (often obtained with a portable x-ray unit)
obtained. In this scenario, the x-rays pass from the front of the patient’s
chest (anterior) through the back (posterior), then strike the detector,
yielding an AP view. From the point of view of image quality, a PA
projection is preferred for several reasons. For example, the portions of
the chest closest to the detector are the sharpest and least magnified on
the image. Since the heart sits in the anterior half of the chest in most
individuals, a more accurate representation of cardiac size and shape is
obtained on a PA view, compared to an AP view.
Usually, radiologists see PA and lateral views.
"""
parser.add_argument(
"--xray_views",
type=str,
default=xray_views,
nargs="+",
help="The type of the views for the chext x-ray: lateral, PA, or AP.",
)
parser.add_argument(
"--xray_datasets",
type=str,
default=xray_datasets,
nargs="+",
help="The names of the datasets with xray-s.",
)
parser.add_argument(
"--count_noise",
type=str,
default="bounded",
# default='gaussian',
help="The type of noise added in the multiple-counting query mechanism.",
)
parser.add_argument(
"--vote_type",
type=str,
# default = '',
# default='probability',
# default='discrete',
default=vote_type,
help="The type of votes. Discrete - each vote is a single number 0 or 1,"
"or probability - the probability of a label being one.",
)
parser.add_argument(
"--pick_labels",
type=int,
nargs="+",
# default=None,
# default=labels_order[:2],
# default=pick_labels,
# default=[-1],
# default=None,
default=pick_labels,
help="Which labels to limit the dataset to. Set to None to select all "
"labels.",
)
parser.add_argument(
"--query_set_type",
type=str,
default="raw",
# default='numpy',
help="The type of query set saved for the retraining when we query the"
"ensemble of the teacher models.",
)
parser.add_argument(
"--test_models_type",
type=str,
# default='retrained',
default="private",
help="The type of models to be tested.",
)
parser.add_argument(
"--retrain_model_type",
type=str,
default="load",
# default='raw',
help="Should we load the private model for retraining (load) or start"
"from scratch, i.e. from a raw model (raw).",
)
parser.add_argument(
"--transfer_type",
type=str,
# default='cross-domain',
default="",
help="The transfer of knowledge can be cross-domain, e.g., from the "
"chexpert ensemble to the padchest models.",
)
parser.add_argument(
"--sigmoid_op",
type=str,
default="apply",
# default='disable',
help="Apply or disable the sigmoid operation outside of model " "arhictecture.",
)
parser.add_argument(
"--label_reweight",
type=str,
# default='apply',
default="disable",
help="Apply or disable the label reweighting based on the balanced "
"accuracy found on the privately trained model.",
)
parser.add_argument(
"--load_taus",
type=str,
# default='apply',
default="disable",
help="Apply or disable loading the taus (probability thresholds for "
"each label) from the model checkpoint.",
)
parser.add_argument(
"--show_dp_budget",
type=str,
default="apply",
# default='disable',
help="Apply or disable showing the current privacy budget.",
)
parser.add_argument(
"--apply_data_independent_bound",
# default="False",
default="True",
type=str,
choices=bool_choices,
help="Disable it in case of the privacy estimate for " "model extraction.",
)
bool_params.append("apply_data_independent_bound")
parser.add_argument(
"--retrain_extracted_model",
default="True",
type=str,
choices=bool_choices,
help="Do we re-train the extracted / stolen model on the newly labeled data?",
)
bool_params.append("retrain_extracted_model")
parser.add_argument(
"--load_votes",
default="True",
# default="False",
type=str,
choices=bool_choices,
help="Do we re-load votes saved on disk?",
)
bool_params.append("load_votes")
# DPSGD
parser.add_argument('--DPSGD', type=str2bool,
default='False')
parser.add_argument('--DPSGD_EPOCHS', type=int, default=10)
parser.add_argument('--DPSGD_BATCH_SIZE', type=int, default=2)
parser.add_argument('--DPSGD_NOISE_MULTIPLIER', type=float, default=1.3)
parser.add_argument('--DPSGD_CCLIP', type=float, default=0)
parser.add_argument('--DPSGD_LR', type=float, default=0.001)
parser.add_argument('--DPSGD_PASCAL_PATH', type=str, required=False,
default='/VOC2012/')
parser.add_argument('--cuda', type=str, required=False,
default=True)
parser.add_argument("-f", "--f", "--fff", "--ip", "--stdin", "--control", "--hb", "--Session.signature_scheme", "--Session.key", "--shell", "--transport", "--iopub", help="a dummy argument to fool ipython", default="1")
args = parser.parse_args()
#args, unknown = parser.parse_known_args()
args.cwd = os.getcwd()
for param in bool_params:
transform_bool(args=args, param=param)
# os.environ["CUDA_VISIBLE_DEVICES"] = f'{args.device_ids}'
print_args(args=args)
set_model_size(args=args)
return args
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def transform_bool(args, param: str):
"""
Transform the string boolean params to python bool values.
:param args: program args
:param param: name of the boolean param
"""
attr_value = getattr(args, param, None)
if attr_value is None:
raise Exception(f"Unknown param in args: {param}")
if attr_value == "True":
setattr(args, param, True)
elif attr_value == "False":
setattr(args, param, False)
else:
raise Exception(f"Unknown value for the args.{param}: {attr_value}.")
def get_chexpert_paremeters(parser: ArgumentParser, timestamp: str):
"""
CheXpert parameters.
:param parser: args parser
:param timestamp: the global timestamp
:return: parser with parameters for the CheXpert dataset.
"""
parser.add_argument(
"--save_path",
default=f"./save-{timestamp}",
metavar="SAVE_PATH",
type=str,
help="Path to the saved models",
)
parser.add_argument(
"--pre_train",
default=None,
type=str,
help="If get parameters from pretrained model",
)
parser.add_argument(
"--resume", default=0, type=int, help="If resume from previous run"
)
parser.add_argument(
"--logtofile",
default=True,
type=bool,
help="Save log in save_path/log.txt if set True",
)
parser.add_argument(
"--chexpert_dataset_type",
# classify for each sample if there is a disease
# (positive, pos) or there is no disease (negative, neg)
default="pos",
# default='single', # binary classification for a single disease
# default = 'multilabel', # multilabel, classify which diseases are present
type=str,
help="If get parameters from pretrained model",
)
parser.add_argument(
"--nan",
help="not a number or N/A values",
# type=int, default=-1,
type=float,
default=np.nan,
)
return parser
def print_args(args, get_str=False):
if "delimiter" in args:
delimiter = args.delimiter
elif "sep" in args:
delimiter = args.sep
else:
delimiter = ";"
print("###################################################################")
print("args: ")
keys = sorted(
[
a
for a in dir(args)
if not (
a.startswith("__")
or a.startswith("_")
or a == "sep"
or a == "delimiter"
)
]
)
values = [getattr(args, key) for key in keys]
if get_str:
keys_str = delimiter.join([str(a) for a in keys])
values_str = delimiter.join([str(a) for a in values])
print(keys_str)
print(values_str)
return keys_str, values_str
else:
for key, value in zip(keys, values):
print(key, ": ", value, flush=True)
print("ARGS FINISHED", flush=True)
print("######################################################")
|
[
"noreply@github.com"
] |
Songpei-Lu.noreply@github.com
|
a3ab64812e3683fca07ee72374edefe3d68d5c7b
|
84aa6f90e5cf5f2e49a9488c1768f2794cbd50db
|
/student/101022137/hw2/mandelbrot.py
|
ee9196ef219d8f3d0e1293e3398c04bc88ad9b2c
|
[] |
no_license
|
u101022119/NTHU10220PHYS290000
|
c927bf480df468d7d113e00d764089600b30e69f
|
9e0b5d86117666d04e14f29a253f0aeede4a4dbb
|
refs/heads/master
| 2021-01-16T22:07:41.396855
| 2014-06-22T11:43:36
| 2014-06-22T11:43:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from cmath import *
from pylab import *
sys.setrecursionlimit(1500)
def myz(c,n):
z = {0:0.}
for i in range(1,n+1):
z[i] = z[i-1]**2 + c
if abs(z[i]) > 2.0:
return 2.
return abs(z[i])
def Mandelbrot (x,y,n):
c = complex(x,y)
return myz(c,n)
x = arange(-2,2,0.005)
y = arange(-2,2,0.005)
data =[[Mandelbrot(i,j,100) for i in x] for j in y]
imshow(data)
xlabel("real")
ylabel("image")
show()
|
[
"dick0914y@yahoo.com.tw"
] |
dick0914y@yahoo.com.tw
|
cbda68666975a4ac724d50adfd235d7df2d54cc2
|
6d653426a05c5647eb40748a1ed849491ed9e710
|
/zjazd_1/kolekcje/zad.4.py
|
2e49523b8002a99f4eff1f3fa205d3a8c0beb1f4
|
[] |
no_license
|
stanilevitch/python_06102018
|
fb908b513ad6a40a66cd02e638fddaf9d698120d
|
59a90e20c0c4037f7890e7e6a40e8fb17a3fd907
|
refs/heads/master
| 2020-03-31T02:51:48.259058
| 2018-12-02T15:44:49
| 2018-12-02T15:44:49
| 151,842,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
# wypisuje liczby od 0 do 100
# for i in range(101):
# print(i)
podzielna = 0
# wypisuje liczby od 0 do 100 podzielne przez 3 lub podzielne przez 5
for i in range(101):
if i % 3 == 0 or i % 5 == 0:
# print(i)
podzielna += 1
# napisz ile liczb wystąpiło w tym przedziale
print(podzielna)
print(f"W przedziale 0-100 liczb jest {podzielna} liczb podzielnych przez 3 i 5")
# sposób rkorzen
# wypisać
lista = list(range(101))
wynik = []
for el in lista:
for el%3 == 0 or el%5 == 0:
wynik.append(el)
print(len(wynik))
|
[
"stanilevitch@gmail.com"
] |
stanilevitch@gmail.com
|
f06136992aafe7aff8e6bff8c86cbd4d74c7cbe1
|
5763d51d0b3ed804bd6a7003eb3b7d21ffb86f50
|
/python/zanki0.py
|
a536409e50c28811956d77056b1fd0409c773232
|
[] |
no_license
|
rafael950110/programs
|
42ae008c638c5037b15d1610ff61b5904f638c99
|
9f2992d2a487974b62f60526fbfb239aee279845
|
refs/heads/master
| 2020-05-20T17:53:22.787229
| 2019-07-08T10:12:01
| 2019-07-08T10:12:01
| 185,696,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
l = [0b0101010, 0b0110011, 0b1010111, 0b0011000, 0b1001000, 0b0010011, 0b0100001, 0b0001010]
n = [0 for i in range(len(l))]
|
[
"ichirosasaki@sf.cs.it-chiba.ac.jp"
] |
ichirosasaki@sf.cs.it-chiba.ac.jp
|
25fdd9ed11034f1c2c0cdf74d3b2e38b4afd11de
|
871775171633543c9f9d37fbc7ab022fec2c434e
|
/appium_two.py
|
6d46a30794afc398634f714502255f747bbd8097
|
[] |
no_license
|
ToddCombs/Phenomenon
|
886e9da9154c14a298887dd65fabe61db4c2f2ee
|
d5f656156e05eed7df19e51b22ee7567319eb59a
|
refs/heads/master
| 2020-07-04T14:50:40.482186
| 2020-01-09T02:36:31
| 2020-01-09T02:36:31
| 202,316,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
# author:ToddCombs
# appium例子2打开大鹏教育
from appium import webdriver
from selenium import webdriver
import time
desired_caps = {}
desired_caps['platformName'] = 'Android'
# desired_caps['platformVersion'] = '6.0'
desired_caps['deviceName'] = '9HQ4C19909000574'
desired_caps['platformVersion'] = '8.1.0'
# desired_caps['deviceName'] = 'Y9CAAMGMD6S8PFAQ'
desired_caps['appPackage'] = 'com.power.dapengeducation'
desired_caps['appActivity'] = '.ui.login.TransitionActivity'
# 大鹏教育的appPackage名和首页引导图的Activity
# desired_caps['appPackage'] = 'com.power.dapengeducation'
# desired_caps['appActivity'] = '.ui.login.TransitionActivity'
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
time.sleep(5)
def getSize():
x = driver.get_window_size()['width']
y = driver.get_window_size()['height']
return (x, y)
#屏幕向上滑动
def swipeUp(t):
l = getSize()
x1 = int(l[0] * 0.5) #x坐标
y1 = int(l[1] * 0.75) #起始y坐标
y2 = int(l[1] * 0.25) #终点y坐标
driver.swipeUp(x1, y1, x1, y2,t)
driver.find_element_by_id("美术")[0].click()
driver.quit()
|
[
"noreply@github.com"
] |
ToddCombs.noreply@github.com
|
4d150077efcfe7e49462570169130a9d2b4b2ce1
|
22537dcb756a3bbf7d88879b5330a3dfb8dc613e
|
/function_example.py
|
047038aa357b87fdd357448e1aee67483b14a27e
|
[] |
no_license
|
nnennandukwe/teaching_python
|
c50ce23bcbbb82652040192d37d792a974816842
|
9acd07b32c7a1fbebb7c334ee8bff819be1aae26
|
refs/heads/master
| 2021-01-25T10:01:00.124222
| 2018-02-28T19:57:51
| 2018-02-28T19:57:51
| 123,335,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
def hello(random_word,funny_word,mean_word):
return random_word, funny_word, mean_word
a = hello("school","hahaha","ugly")
b = hello("happy","silly","whatever")
print(a)
print(b)
def goodbye(name):
return "goodbye " + name
def add(first,second):
total = first / second
return "first number divided by second number: " + str(total)
print(add(4,5))
print(goodbye("Ashley"))
print(goodbye("Katie"))
|
[
"nnenna.s.ndukwe@gmail.com"
] |
nnenna.s.ndukwe@gmail.com
|
be6b29b48327bd6c403353bcc6be9d141e456122
|
e5a653f690e5d0de3fb5b675ab3f9824b5592a0d
|
/gitbro/cli.py
|
dc43bf704e667cda289a3c92011aa2e6fe4514a8
|
[
"MIT"
] |
permissive
|
destag/gitbro
|
c0114636c1c798817a58d24ff102e0ef64d471b1
|
9c2f56df9bda88fb657b371455b694046bd05c96
|
refs/heads/master
| 2020-04-08T17:24:57.936885
| 2018-11-28T22:02:04
| 2018-11-28T22:02:04
| 159,565,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
#!/usr/bin/env python3
"""Module for aggregating git repos."""
import sys
import logging
import argparse
import glob
import pathlib
import subprocess
import re
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-p', '--pathname',
dest='pathname',
default='.',
help='Pathnema',
)
parser.add_argument(
'-f', '--fetch',
dest='fetch',
action='store_true',
help='flag',
)
args = parser.parse_args()
for repo_path in pathlib.Path(args.pathname).glob('**/.git'):
git_repo = repo_path.absolute()
print(git_repo.parts[-2])
r = subprocess.run(['git', '--git-dir={repo}'.format(repo=git_repo), 'status', '-sb'], stdout=subprocess.PIPE).stdout.decode('utf-8')
stat_line = r.split('\n')[0]
match = re.match(r'## (?P<localbranch>[\w\/]*)...(?P<remotebranch>\w*)', stat_line)
print(match.group('localbranch') + '---' + match.group('remotebranch'))
if args.fetch:
r = subprocess.run(['git', '--git-dir={repo}'.format(repo=git_repo), 'fetch'], stdout=subprocess.PIPE)
# print(pathlib.Path(path).absolute().split())
if __name__ == '__main__':
main()
|
[
"przemyslawp94@gmail.com"
] |
przemyslawp94@gmail.com
|
b084f8cca901271134f986496362547203b961ef
|
e728a7b5447c4ca03ba799bec61459528f30fd88
|
/test/test_models/recipient.py
|
9841fad9857e0befdefa3468f52b10e92c1b3987
|
[] |
no_license
|
reritom/Esvi
|
deb75c0ca4b17494ed80adc3b735be008e0b3352
|
1e3242c61ec86af7b7479cd71f12a8677fcbde1f
|
refs/heads/master
| 2020-03-20T02:08:01.695643
| 2018-11-04T18:35:52
| 2018-11-04T18:35:52
| 137,100,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from esvi import model
from esvi import fields
from test.test_models.contact import Contact
from test.test_models.message import Message
class Recipient(model.Model):
recipient_id = fields.StringField(primary=True)
contact = fields.ForeignKey(Contact)
message = fields.ForeignKey(Message)
|
[
"reikudjinn@gmail.com"
] |
reikudjinn@gmail.com
|
4566bf037da7d5ffbdbec7a6ab230d3fb82b0c19
|
83684f905274bea5ec0aa81e57abe6d6af5f65c6
|
/email_attachment_ept/py/email_attachment_ept.py
|
f201d5a7bc27a2d7b4307b5ee55d093db0039115
|
[] |
no_license
|
arpanv/pansuriya
|
f47064679aa2c98c1e9d3a6f0605c98a685e00cf
|
b8615b70c33b79d8b2454cef4151d3f83c3bc77f
|
refs/heads/master
| 2020-04-06T06:54:00.271098
| 2014-09-17T14:06:08
| 2014-09-17T14:06:08
| 24,102,473
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,806
|
py
|
import base64
import logging
from openerp import netsvc
from openerp.osv import osv, fields
from openerp.osv import fields
from openerp import tools
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
import time
class email_attachment_eept(osv.osv):
_inherit = "email.template"
_columns = {
"email_attachment_ids" : fields.many2many("ir.actions.report.xml","email_attachment","temp_id","rep_id","Attachment")
}
def generate_email(self, cr, uid, template_id, res_id, context=None):
"""Generates an email from the template for given (model, res_id) pair.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format expected by :py:meth:`mail_thread.message_post`.
"""
if context is None:
context = {}
report_xml_pool = self.pool.get('ir.actions.report.xml')
template = self.get_email_template(cr, uid, template_id, res_id, context)
values = {}
for field in ['subject', 'body_html', 'email_from',
'email_to', 'email_recipients', 'email_cc', 'reply_to']:
values[field] = self.render_template(cr, uid, getattr(template, field),
template.model, res_id, context=context) \
or False
if template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature)
if values['body_html']:
values['body'] = tools.html_sanitize(values['body_html'])
values.update(mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False)
attachments = []
# Add report in attachments
if template.email_attachment_ids:
for email_attachment in template.email_attachment_ids:
report_name = email_attachment.name or '%s_report' %(template.model)
if email_attachment.attachment:
report_name = eval(email_attachment.attachment)
report_service = 'report.' + report_xml_pool.browse(cr, uid, email_attachment.id, context).report_name
# Ensure report is rendered using template's language
ctx = context.copy()
if template.lang:
ctx['lang'] = self.render_template(cr, uid, template.lang, template.model, res_id, context)
service = netsvc.LocalService(report_service)
(result, format) = service.create(cr, uid, [res_id], {'model': template.model}, ctx)
if result:
result = base64.b64encode(result)
if not report_name:
report_name = report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
attachment_ids = []
# Add template attachments
for attach in template.attachment_ids:
attachment_ids.append(attach.id)
values['attachments'] = attachments
values['attachment_ids'] = attachment_ids
return values
|
[
"sohil@sohil.(none)"
] |
sohil@sohil.(none)
|
a98a6466c39492721ebce5de522c77874a2cdee4
|
bad0c63b82e44293272ab1452d077475f79e69a7
|
/Django/mysite/polls/urls.py
|
ea3f6f26eb2b7ec988a65380df7952ce96c0fa2c
|
[] |
no_license
|
NahianAlindo/dev_framework_practice
|
201102b65c0258941053428e51c42634c0c41cb5
|
9b956167ba0887530cb3492453d2d1f64f80ee11
|
refs/heads/main
| 2023-06-03T17:29:14.780197
| 2021-06-28T10:41:01
| 2021-06-28T10:41:01
| 332,162,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'), #pk means primary key
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path("<int:question_id>/vote/", views.vote, name='vote'),
]
|
[
"nrifaat26@gmail.com"
] |
nrifaat26@gmail.com
|
647a162f8c8f6a8d43a163bbbba60c1fd13d27ce
|
3c50498feb6fb630bf9574653de7fcb04faf1bdb
|
/sd_card/Trinket_SDlogger/code.py
|
a357b497a19103ed800aebceaf5f2685ef238504
|
[
"MIT"
] |
permissive
|
DLR-School-Lab-TU-Dresden/weather_station_feather_m0_express
|
1c31daafbaf28dafd1176523394ebf82083e52dd
|
735b8dda3bd38c8462d66d35ed573c328ad9f9fa
|
refs/heads/master
| 2020-07-03T02:47:22.996131
| 2019-08-14T19:57:30
| 2019-08-14T19:57:30
| 201,760,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
import time
import adafruit_sdcard
import board
import busio
import digitalio
import microcontroller
import storage
# Use any pin that is not taken by SPI
SD_CS = board.D10
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
# Connect to the card and mount the filesystem.
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
cs = digitalio.DigitalInOut(SD_CS)
sdcard = adafruit_sdcard.SDCard(spi, cs)
vfs = storage.VfsFat(sdcard)
storage.mount(vfs, "/sd")
# Use the filesystem as normal! Our files are under /sd
print("Logging temperature to filesystem")
# append to the file!
while True:
# open file for append
with open("/sd/temperature.txt", "a") as f:
led.value = True # turn on LED to indicate we're writing to the file
t = microcontroller.cpu.temperature
print("Temperature = %0.1f" % t)
f.write("%0.1f\n" % t)
led.value = False # turn off LED to indicate we're done
# file is saved
time.sleep(1)
|
[
"jahndiego@mailbox.org"
] |
jahndiego@mailbox.org
|
efa317d7041599841fb86a0487ec1c5b60012f8f
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/integration/testdata/buildcmd/PythonPEP600/main.py
|
b636d9d592b30d57cbcf48601a1a0c30754d3393
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
import numpy
# from cryptography.fernet import Fernet
def handler(event, context):
# Try using some of the modules to make sure they work & don't crash the process
# print(Fernet.generate_key())
return {"pi": "{0:.2f}".format(numpy.pi)}
def first_function_handler(event, context):
return "Hello World"
def second_function_handler(event, context):
return "Hello Mars"
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
7375245c12052dfb9d73aec915ab424d5375a4ca
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_appals.py
|
cd6c5bfec1295894c742a3a67db3382d4957e618
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.verbs._appal import _APPAL
#calss header
class _APPALS(_APPAL, ):
def __init__(self,):
_APPAL.__init__(self)
self.name = "APPALS"
self.specie = 'verbs'
self.basic = "appal"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
53029b8afdd28c2e40ccc0b7d4e99e6aa2ea3770
|
64dbf9cff4508d5173995e885c151156786329fc
|
/lulz4life.py
|
c028900d8725ea7d9dce1f276d7e7e907f083c39
|
[] |
no_license
|
LulzSecToolkit/LulzPy-Mass-Mailer
|
f1c634a3bd03fbb0a99eb12174c367d092604274
|
7b3a313a336d81074b4cf53f921539b6661473c3
|
refs/heads/master
| 2022-12-02T19:40:43.306176
| 2020-08-25T00:31:16
| 2020-08-25T00:31:16
| 290,068,693
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
import requests
import sys
if len(sys.argv) != 3:
print(f'usage: {sys.argv[0]} <email.txt> <message.html>')
sys.exit()
email=sys.argv[1]
htmlfile=sys.argv[2]
with open(email,"r") as f:
email=f.readlines()
email=[n.rsplit()[0] for n in email]
f.close()
with open(htmlfile,"r") as f:
msg=f.read()
f.close()
data={"sub":"Testing Emial","email":"","from":"no-reply@localhost","message":msg,"submit":"Submit"}
url="https://localhost/Honk.php"
proxies = {
'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'
}
for send in email:
data["email"]=send
r=requests.post(url,data=data,proxies=proxies)
if r.status_code == 200:
print(f'{send} Success')
|
[
"noreply@github.com"
] |
LulzSecToolkit.noreply@github.com
|
a6030d9a1e2b395f3ddfb2121f3df47c7aa736b5
|
681cff013b2ad7c654d7744da13d1f3e1320e6ef
|
/payments/__init__.py
|
2b615aeca40e0397cfaa00d5000018dfa1ada61e
|
[] |
no_license
|
MattTheRed/wigglebar
|
56a85961530d5835ffa393197ec08b4a4c58f922
|
ffd9eeeb510971321a0142f56964af05929baf78
|
refs/heads/master
| 2016-09-05T22:08:58.806948
| 2014-12-15T14:32:19
| 2014-12-15T14:32:19
| 28,040,131
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
__version__ = "2.0b27"
|
[
"pindardev@gmail.com"
] |
pindardev@gmail.com
|
4dbf7645879c6b1cfecabf6e1cb8a484ad15876a
|
5fa8d29f1988e39ca7d0313138dd1832b0966be2
|
/dvc/stage/exceptions.py
|
19d07aea674c4552246d60de3758055394c0440c
|
[
"Apache-2.0"
] |
permissive
|
jackwellsxyz/dvc
|
11aa627af1ecce1a9079c6992e48ff300fa81cfc
|
6a64f861783f3c2eadfc0364725ab06aa3ebb387
|
refs/heads/master
| 2022-07-09T13:16:44.869416
| 2020-05-06T21:25:41
| 2020-05-06T21:25:41
| 261,893,679
| 0
| 0
|
Apache-2.0
| 2020-05-06T22:35:41
| 2020-05-06T22:35:41
| null |
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
from dvc.exceptions import DvcException
class StageCmdFailedError(DvcException):
def __init__(self, stage, status=None):
msg = "failed to run: {}".format(stage.cmd)
if status is not None:
msg += ", exited with {}".format(status)
super().__init__(msg)
class StageFileFormatError(DvcException):
def __init__(self, fname, e):
msg = "DVC-file '{}' format error: {}".format(fname, str(e))
super().__init__(msg)
class StageFileDoesNotExistError(DvcException):
def __init__(self, fname):
from dvc.dvcfile import DVC_FILE_SUFFIX, is_dvc_file
msg = "'{}' does not exist.".format(fname)
sname = fname + DVC_FILE_SUFFIX
if is_dvc_file(sname):
msg += " Do you mean '{}'?".format(sname)
super().__init__(msg)
class StageFileAlreadyExistsError(DvcException):
def __init__(self, relpath):
msg = "not overwriting '{}'".format(relpath)
super().__init__(msg)
class StageFileIsNotDvcFileError(DvcException):
def __init__(self, fname):
from dvc.dvcfile import DVC_FILE_SUFFIX, is_dvc_file
msg = "'{}' is not a DVC-file".format(fname)
sname = fname + DVC_FILE_SUFFIX
if is_dvc_file(sname):
msg += " Do you mean '{}'?".format(sname)
super().__init__(msg)
class StageFileBadNameError(DvcException):
pass
class StagePathOutsideError(DvcException):
pass
class StagePathNotFoundError(DvcException):
pass
class StagePathNotDirectoryError(DvcException):
pass
class StageCommitError(DvcException):
pass
class StageUpdateError(DvcException):
def __init__(self, path):
super().__init__(
"update is not supported for '{}' that is not an "
"import.".format(path)
)
class MissingDataSource(DvcException):
def __init__(self, missing_files):
assert len(missing_files) > 0
source = "source"
if len(missing_files) > 1:
source += "s"
msg = "missing data '{}': {}".format(source, ", ".join(missing_files))
super().__init__(msg)
class StageNotFound(KeyError, DvcException):
def __init__(self, file, name):
super().__init__(
"Stage '{}' not found inside '{}' file".format(name, file.relpath)
)
class StageNameUnspecified(DvcException):
def __init__(self, file):
super().__init__(
"Stage name not provided."
"Please specify the name as: `{0}:stage_name`".format(file.relpath)
)
class DuplicateStageName(DvcException):
def __init__(self, name, file):
super().__init__(
"Stage '{name}' already exists in '{relpath}'.".format(
name=name, relpath=file.relpath
)
)
class InvalidStageName(DvcException):
def __init__(self,):
super().__init__(
"Stage name cannot contain invalid characters: "
"'\\', '/', '@' and ':'."
)
|
[
"noreply@github.com"
] |
jackwellsxyz.noreply@github.com
|
2a24cb3857d77f7ce4f16edcec9b5c31a9d4e49c
|
6fd7413bea392d6d9cd89aa665c8b8fb4a1edea0
|
/Page/Login_Page.py
|
f13861f14ce711e7050d5a16ec7bd2c7e0996f37
|
[] |
no_license
|
cocoL123/app_08test
|
e5fc24caafa44a0273f127d37120a00f79b4522d
|
fd242486d802b41f8d99250598ed10aab8a8d881
|
refs/heads/master
| 2020-03-22T11:09:57.793570
| 2018-07-10T07:31:43
| 2018-07-10T07:31:43
| 139,952,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
# __author: Honorbaby
# date: 2018/7/9
import allure
from Base.Base import Base
import Page
class Login_Page(Base):
def __init__(self,driver):
Base.__init__(self,driver)
@allure.step(title = "点击我的按钮")
def click_my_btn(self):
self.click_element(Page.my_btn)
@allure.step(title = "点击登录注册")
def click_sign_btn(self):
self.click_element(Page.login_btn)
@allure.step(title = "输入账号密码")
def input_mess(self,username,userpwd):
allure.attach("账号:","%s" % (username))
allure.attach("密码:","%s" % (userpwd))
self.input_element(Page.user_name_btn, username)
self.input_element(Page.user_pwd_btn, userpwd)
@allure.step(title = "点击登录按钮")
def click_login_btn(self):
self.click_element(Page.login_btn_id)
@allure.step(title = "判断我的订单按钮是否存在")
def if_my_order_status(self):
allure.attch("存在:","True")
allure.attch("不存在:","False")
try:
self.search_element(Page.my_order_btn)
return True
except Exception as e:
assert False
@allure.step(title = "点击设置按钮")
def click_setting_btn(self):
self.click_element(Page.setting_btn)
@allure.step(title="点击关闭登录输入信息页面按钮")
def click_close(self):
self.click_element(Page.login_close_page_id)
|
[
"1715285528@qq.com"
] |
1715285528@qq.com
|
5763b4238a4b84b082656990dc79969144711f47
|
b7c8760d69977dff3b4a3cb65eceae263d42b8b8
|
/server/scripts/monitor.py
|
a9f21009020a3a8bc4946903218dca1ebe81271d
|
[] |
no_license
|
x100up/analystics
|
086f1578c01255baf7484031d232e87d59fbeeab
|
9f58e4c53653e1fe53abc10861008fe0afef5979
|
refs/heads/master
| 2021-01-15T11:18:49.118104
| 2013-02-14T12:25:36
| 2013-02-14T12:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,618
|
py
|
# -*- coding: utf-8 -*-
from scripts.baseScript import BaseAnalyticsScript
from components.listutils import listDiff
from models.Config import Config
import re
from datetime import date
from models.App import App
from services.HiveMetaService import HiveMetaService
CREATE_TABLE_QUERY = """CREATE EXTERNAL TABLE %(table_name)s (params MAP<STRING, STRING>, `userId` INT, `timestamp` TIMESTAMP, hour INT, minute INT, second INT)
PARTITIONED BY (dt STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\;'
MAP KEYS TERMINATED BY '='"""
CREATE_PARTITION_QUERY = """
ALTER TABLE %(table_name)s ADD
PARTITION (dt='%(year)d-%(month)02d-%(day)02d') location '%(path)s'
"""
class MonitorScript(BaseAnalyticsScript):
partNameR = re.compile('^dt=(\d+)-(\d+)-(\d+)$')
def run(self):
command = self.options['command']
self.hiveMetaService = None
self.hiveclient = self.getHiveClient()
if command == 'reload':
day = int(self.options['day'])
month = int(self.options['month'])
year = int(self.options['year'])
appname = self.options['appname']
self.reload(appname, day, month, year)
else:
for appCode in self.getAppCodes():
self.processApp(appCode)
def reload(self, appCode, day, month, year):
dbSession = self.getDBSession()
app = dbSession.query(App).filter(App.code == appCode).first()
if not app:
print 'App {} not present in database. Process app terminated'.format(appCode)
return
print 'ProcessApp {}'.format(appCode)
self.hiveclient.execute('USE stat_{}'.format(appCode))
appConfig = self.getAppConfig(appCode)
hiveMetaService = self.getHiveMetaService()
for eventCode in [appEvent.code for appEvent in appConfig.getEvents()]:
hiveTable = hiveMetaService.getOrCreateHiveTable(app.appId, eventCode)
if not hiveTable:
print 'Cannot get or create HiveTable for {} {}'.format(appCode, eventCode)
continue
self.dropPartition(year, month, day, eventCode)
if self.createPartition(year, month, day, appCode, eventCode):
self.getHiveMetaService().getOrCreateHiveTablePartition(hiveTable.hiveTableId, date(year, month, day))
def processApp(self, appCode):
print 'ProcessApp {}'.format(appCode)
appConfig = self.getAppConfig(appCode)
webHDFSClient = self.getWebHDFSClient()
dbSession = self.getDBSession()
app = dbSession.query(App).filter(App.code == appCode).first()
if not app:
print 'App {} not present in database. Process app terminated'.format(appCode)
return
hiveMetaService = self.getHiveMetaService()
# получаем список директорий -ключей
hdfsEvents = webHDFSClient.getEventCodes(appCode)
realKeys = [appEvent.code for appEvent in appConfig.getEvents()]
# ключи есть в настройках, но нет директорий
non_existing_folders = listDiff(realKeys, hdfsEvents)
# check table existing
try:
self.hiveclient.execute('CREATE DATABASE IF NOT EXISTS {}'.format(self.getDBName(appCode)))
except BaseException:
print 'Exception on create database {}'.format(appCode)
return
self.hiveclient.execute('USE {}'.format(self.getDBName(appCode)))
tables = self.hiveclient.execute('SHOW TABLES')
tables = [item[0] for item in tables]
print 'tables for app {}: {} '.format(appCode, len(tables))
for eventCode in realKeys:
print eventCode
if eventCode in non_existing_folders:
continue
table_name = self.getTableName(eventCode)
if not table_name in tables:
print 'table {} not exist'.format(table_name)
# create table
q = CREATE_TABLE_QUERY % {'table_name':table_name}
try:
print q
self.hiveclient.execute(q)
except BaseException:
print 'Exception on create Table {}'.format(table_name)
return
hiveTable = hiveMetaService.getOrCreateHiveTable(app.appId, eventCode)
if not hiveTable:
print 'Cannot get or create HiveTable for {} {}'.format(appCode, eventCode)
continue
# получаем партиции в Hive
partitions = self.hiveclient.execute('SHOW PARTITIONS {}'.format(table_name))
partitions = [item[0] for item in partitions]
print partitions
existingPartitionsDates = []
for partName in partitions:
r = self.partNameR.search(partName).group
existingPartitionsDates.append(date(int(r(1)), int(r(2)), int(r(3))))
# полчаем партиции в HDFS
hdfsPartitions = webHDFSClient.getPartitions(appCode, eventCode)
for partitionDate in hdfsPartitions:
# если дата партиции есть на диске но ее нет в Hive
if not partitionDate in existingPartitionsDates:
year, month, day = (partitionDate.year, partitionDate.month, partitionDate.day)
if self.createPartition(year, month, day, appCode, eventCode):
self.getHiveMetaService().getOrCreateHiveTablePartition(hiveTable.hiveTableId, partitionDate)
def createPartition(self, year, month, day, appCode, eventCode):
table_name = self.getTableName(eventCode)
query = CREATE_PARTITION_QUERY % {
'table_name': table_name,
'year': year,
'month': month,
'day': day,
'path': '{}/{}/{}/{}/{}/'.format(self.getTablePath(appCode), eventCode, year, month, day)
}
print 'Create partition {}.{}.{} for {}'.format(year, month, day, table_name)
try:
print query
self.hiveclient.execute(query)
except Exception as ex:
print '- Exception on create partition: {}'.format(ex.message)
return False
else:
print '+ Partition created'
return True
def dropPartition(self, year, month, day, eventCode):
table_name = self.getTableName(eventCode)
query = 'ALTER TABLE %(table_name)s DROP PARTITION (dt=\'%(year)d-%(month)02d-%(day)02d\')' % {
'table_name': table_name, 'day': day, 'month': month, 'year': year}
print 'Drop partition {}-{}-{} for {}'.format(year, month, day, table_name)
try:
print query
self.hiveclient.execute(query)
except Exception as ex:
print '- Exception on drop partition: {}'.format(ex.message)
return False
else:
print '+ Partition droped'
return True
def getHiveMetaService(self):
if not self.hiveMetaService:
dbSession = self.getDBSession()
self.hiveMetaService = HiveMetaService(dbSession)
return self.hiveMetaService
def getTableName(self, eventCode):
return self.config.get(Config.HIVE_PREFIX) + eventCode
def getDBName(self, appCode):
return 'stat_' + appCode
def getTablePath(self, appCode):
return self.config.get(Config.HDFS_STAT_ROOT) + appCode + '/'
|
[
"x100up@yandex.ru"
] |
x100up@yandex.ru
|
87a747dc43589b6f9f052e38773a1b58bd9e85a3
|
a0fe8fe36a6133b0e98ff64c220b01485ae4dd49
|
/carts/paginations.py
|
b4c67881539cefd2d1999b74e9f5f4f2775ad967
|
[] |
no_license
|
spolox/rest_week1
|
27e97864f5c7f44506238db74bd646600f5ccc2d
|
0ce93c0e74f2e43935796f12d8ccc8b997d64d6f
|
refs/heads/main
| 2023-03-02T17:51:19.128241
| 2021-02-07T14:50:38
| 2021-02-07T14:50:38
| 329,290,509
| 1
| 0
| null | 2021-02-15T19:14:06
| 2021-01-13T11:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 180
|
py
|
from rest_framework.pagination import LimitOffsetPagination
class CartItemLimitOffsetPagination(LimitOffsetPagination):
default_limit = 6
max_limit = 6
min_limit = 1
|
[
"draconvs@yandex.ru"
] |
draconvs@yandex.ru
|
55ad69397a06078257e33d2bfcaec2cab7cfc7d5
|
a6a998561cd087774502a3aea68b1440946049f1
|
/SnakeGame.py
|
8621b972001d240673806e5e05a8f9994994b551
|
[] |
no_license
|
TheDarkKnight1939/Snake
|
126e0a7b47cd986493842d23abe8aede6ec0fb25
|
0dece70864beddbb1cd7438a31003cadde6c8687
|
refs/heads/master
| 2021-06-01T15:50:11.820873
| 2016-03-05T10:10:34
| 2016-03-05T10:10:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,462
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 16 16:43:27 2011
@author: Administrator
"""
from Tkinter import *
import random
words_list=open('words.txt','r')
lines=words_list.readline()
lst=lines.split()
#lst=['clear','cat','move','dog','chemistry','flur','right','heal','python','jerk','land','mark','player','pear','glue','project']
y=0
username=[]
usernumber=[]
highscore=[0]
highscore0=[0]
class SnakeGameStudyEnglish:
def __init__(self):
self.memoryscore=[]
self.step=15
# game score
self.gamescore=-10
self.gamelevel=-1
# to initialize the snake in the range of (x1,y1,x2,y1)
r=random.randrange(191,191+15*10,self.step)
self.snakeX=[r,r+self.step,r+self.step*2]
self.snakeY=[r,r,r]
# to initialize the moving direction
self.snakeDirection = 'left'
self.snakeMove = [-1,0]
# to draw the game frame
window = Tk()
window.geometry("600x400+10+10")
window.maxsize(600,400)
window.minsize(600,400)
window.title("Snake game")
self.frame1=Frame(window)
self.frame2=Frame(window)
self.canvas=Canvas(self.frame1,width=590,height=375,bg="yellow")
self.score_label=Label(self.frame2,text="Score:")
self.level_label=Label(self.frame2,text="Level:")
self.username_label=Label(self.frame2,text="Please Eat Each Letter In Right Order")
self.frame1.pack()
self.frame2.pack(fill=BOTH)
self.score_label.pack(side=LEFT)
self.level_label.pack(side=RIGHT)
self.username_label.pack()
self.canvas.pack(fill=BOTH)
self.draw_wall()
self.draw_score()
self.draw_food()
self.draw_snake()
self.draw_level()
self.play()
window.mainloop()
"=== View Part ==="
def draw_wall(self):
self.canvas.create_line(10,10,582,10,fill='blue',width=5)
self.canvas.create_line(10,359,582,359,fill='blue',width=5)
self.canvas.create_line(10,10,10,359,fill="blue",width=5)
self.canvas.create_line(582,10,582,359,fill="blue",width=5)
def draw_level(self):
self.level()
self.level_label.config(self.level_label,text="Level: "+str(self.gamelevel))
def draw_score(self):
self.score()
self.score_label.config(self.score_label,text="Score: "+str(self.gamescore))
def draw_food(self):
#lst=['clear','cat','move','dog','chemistry','flur','right','heal','python','jerk','land','mark','plyer','pear','glue','project']
#p=random.randrange(0,15,1)
self.Ew=random.choice(lst)
self.lst1=[]
for x in range(len(self.Ew)):
self.foodx,self.foody=self.random_food()
self.canvas.create_rectangle(self.foodx,self.foody,self.foodx+self.step,self.foody+self.step ,tags="food"+str(x),fill='red')
self.canvas.create_text(self.foodx+7,self.foody+7,text=self.Ew[x],tags='food'+str(x))
self.lst1.append([self.foodx,self.foody,'food'+str(x)])
print self.Ew
def draw_snake(self):
self.canvas.delete("snake")
x,y=self.snake()
for i in range(len(x)):
self.canvas.create_rectangle(x[i],y[i],x[i]+self.step,y[i]+self.step\
, fill='orange',tags='snake')
"=== Model Part ==="
# food model
def random_food(self):
return(random.randrange(11,570,self.step),random.randrange(11,340,self.step))
# snake model
def snake(self):
for i in range(len(self.snakeX)-1,0,-1):
self.snakeX[i] = self.snakeX[i-1]
self.snakeY[i] = self.snakeY[i-1]
self.snakeX[0] += self.snakeMove[0]*self.step
self.snakeY[0] += self.snakeMove[1]*self.step
return(self.snakeX,self.snakeY)
#score model
def score(self):
self.gamescore+=10
def level(self):
self.gamelevel+=1
"=== Control Part ==="
def iseated(self):
#global c
#for c in range(len(self.Ew)):
if self.snakeX[0]==self.lst1[0][0] and self.snakeY[0]==self.lst1[0][1]:
return True
else:
return False
def isdead(self):
if self.snakeX[0]<8 or self.snakeX[0] >580 or\
self.snakeY[0]<8 or self.snakeY[0]>350 :
return True
for i in range(1,len(self.snakeX)):
if self.snakeX[0]==self.snakeX[i] and self.snakeY[0]==self.snakeY[i] :
return True
else:
return False
def delete(self):
self.canvas.delete(self.lst1[0][2])
self.lst1.remove(self.lst1[0])
if len(self.lst1)==0:
self.draw_food()
def move(self,event):
# left:[-1,0],right:[1,0],up:[0,1],down:[0,-1]
if (event.keycode == 39 or event.keycode == 68) and self.snakeDirection != 'left':
self.snakeMove = [1,0]
self.snakeDirection = "right"
elif (event.keycode == 38 or event.keycode == 87) and self.snakeDirection != 'down':
self.snakeMove = [0,-1]
self.snakeDirection = "up"
elif (event.keycode == 37 or event.keycode == 65) and self.snakeDirection != 'right':
self.snakeMove = [-1,0]
self.snakeDirection = "left"
elif (event.keycode == 40 or event.keycode == 83) and self.snakeDirection != 'up':
self.snakeMove = [0,1]
self.snakeDirection = "down"
else:
pass
# above codes can be insteaded by the following codes
# if (event.keysym == 'Right' or event.keysym == 'd') and self.snakeDirection != 'left':
# self.snakeMove = [1,0]
# self.snakeDirection = "right"
# elif (event.keysym == 'Up' or event.keysym == 'w') and self.snakeDirection != 'down':
# self.snakeMove = [0,-1]
# self.snakeDirection = "up"
# elif (event.keysym == 'Left' or event.keysym == 'a') and self.snakeDirection != 'right':
# self.snakeMove = [-1,0]
# self.snakeDirection = "left"
# elif (event.keysym == 'Down' or event.keysym == 's') and self.snakeDirection != 'up':
# self.snakeMove = [0,1]
# self.snakeDirection = "down"
# else:
# pass
def play(self):
self.canvas.bind("<Key>",self.move)
self.canvas.focus_set()
while True:
if self.isdead():
self.gameover()
break
elif self.iseated():
self.snakeX[0] += self.snakeMove[0]*self.step
self.snakeY[0] += self.snakeMove[1]*self.step
self.snakeX.insert(1,self.lst1[0][0])
self.snakeY.insert(1,self.lst1[0][1])
self.draw_score()
self.delete()
self.draw_snake()
if self.gamescore%30==0:
self.draw_level()
else:
self.draw_snake()
self.canvas.after(200-10*self.gamelevel)
self.canvas.update()
def gameover(self):
highscoreE=0
self.memoryscore.append(self.gamescore)
for element in self.memoryscore:
if element>highscoreE:
highscoreE=element
if self.gamescore==highscoreE:
highscore.remove(highscore[0])
highscore.append(highscoreE)
file_name='highscoreE.txt'
text=open(file_name,'a')
text.write(str(highscoreE)+'、'+str(username[0])+ '、'+str(usernumber[0])+' \n')
text.close()
self.canvas.unbind('<Key>')
self.canvas.bind("<Key>",self.restart)
self.canvas.create_text(270,180,text=" Game Over!\n \
Press any key to continue",font='Helvetica -30 bold',tags='text')
def restart(self,event):
self.canvas.delete("snake","text")
for u in range(len(self.lst1)):
self.canvas.delete(self.lst1[u][2])
self.lst1=[]
self.canvas.unbind('<Key>')
# to initialize the snake in the range of (191,191,341,341)
r=random.randrange(191,191+15*10,self.step)
self.snakeX=[r,r+self.step,r+self.step*2]
self.snakeY=[r,r,r]
# to initialize the moving direction
self.snakeDirection = 'left'
self.snakeMove = [-1,0]
# reset the score to zero
self.gamelevel=-1
self.gamescore=-10
self.draw_score()
self.draw_level()
# to initialize the game (food and snake)
self.draw_food()
self.draw_snake()
# to play the game
self.play()
class SnakeGameClassical:
def __init__(self):
self.memoryscore0=[]
self.step=15
# game score
self.gamescore0=-10
self.gamelevel=1
# to initialize the snake in the range of (x1,y1,x2,y1)
r=random.randrange(191,191+15*10,self.step)
self.snakeX=[r,r+self.step,r+self.step*2]
self.snakeY=[r,r,r]
# to initialize the moving direction
self.snakeDirection = 'left'
self.snakeMove = [-1,0]
# to draw the game frame
window = Tk()
window.geometry("600x400+10+10")
window.maxsize(600,400)
window.minsize(600,400)
window.title("Snake game")
self.frame1=Frame(window)
self.frame2=Frame(window)
self.canvas=Canvas(self.frame1,width=590,height=375,bg="yellow")
self.score_label=Label(self.frame2,text="Score:")
self.level_label=Label(self.frame2,text="Level:")
self.frame1.pack()
self.frame2.pack(fill=BOTH)
self.score_label.pack(side=LEFT)
self.level_label.pack()
self.canvas.pack(fill=BOTH)
self.draw_wall()
self.draw_score()
self.draw_food()
self.draw_snake()
self.draw_level()
self.play()
window.mainloop()
"=== View Part ==="
def draw_wall(self):
self.canvas.create_line(10,10,582,10,fill='blue',width=5)
self.canvas.create_line(10,359,582,359,fill='blue',width=5)
self.canvas.create_line(10,10,10,359,fill="blue",width=5)
self.canvas.create_line(582,10,582,359,fill="blue",width=5)
def draw_level(self):
self.level()
self.level_label.config(self.level_label,text="Level: "+str(self.gamelevel))
def draw_score(self):
self.score() # score model
self.score_label.config(self.score_label,text="Score: "+str(self.gamescore0))
def draw_food(self):
self.position=[]
for f in range(self.gamelevel):
self.foodx,self.foody=self.random_food() #food model
self.canvas.create_rectangle(self.foodx,self.foody,self.foodx+self.step,self.foody+self.step,fill='red' ,tags="food"+str(f)) #food view
self.position.append([self.foodx,self.foody,'food'+str(f)])
#print self.position
def draw_snake(self):
self.canvas.delete("snake")
x,y=self.snake() # snake model
for i in range(len(x)): # snake view
self.canvas.create_rectangle(x[i],y[i],x[i]+self.step,y[i]+self.step\
, fill='orange',tags='snake')
"=== Model Part ==="
# food model
def random_food(self):
return(random.randrange(11,570,self.step),random.randrange(11,340,self.step))
# snake model
def snake(self):
for i in range(len(self.snakeX)-1,0,-1):
self.snakeX[i] = self.snakeX[i-1]
self.snakeY[i] = self.snakeY[i-1]
self.snakeX[0] += self.snakeMove[0]*self.step
self.snakeY[0] += self.snakeMove[1]*self.step
return(self.snakeX,self.snakeY)
#score model
def score(self):
self.gamescore0+=10
def level(self):
self.gamelevel+=1
"=== Control Part ==="
def iseated(self):
for self.d in range(len(self.position)):
if self.snakeX[0]==self.position[self.d][0] and self.snakeY[0]==self.position[self.d][1]:
return True
else:
return False
def ruin(self):
self.canvas.delete(self.position[self.d][2])
self.position.remove(self.position[self.d])
#print self.position
if self.position==[]:
self.draw_food()
def isdead(self):
if self.snakeX[0]<8 or self.snakeX[0] >580 or\
self.snakeY[0]<8 or self.snakeY[0]>350 :
return True
for i in range(1,len(self.snakeX)):
if self.snakeX[0]==self.snakeX[i] and self.snakeY[0]==self.snakeY[i] :
return True
else:
return False
def move(self,event):
# left:[-1,0],right:[1,0],up:[0,1],down:[0,-1]
if (event.keycode == 39 or event.keycode == 68) and self.snakeDirection != 'left':
self.snakeMove = [1,0]
self.snakeDirection = "right"
elif (event.keycode == 38 or event.keycode == 87) and self.snakeDirection != 'down':
self.snakeMove = [0,-1]
self.snakeDirection = "up"
elif (event.keycode == 37 or event.keycode == 65) and self.snakeDirection != 'right':
self.snakeMove = [-1,0]
self.snakeDirection = "left"
elif (event.keycode == 40 or event.keycode == 83) and self.snakeDirection != 'up':
self.snakeMove = [0,1]
self.snakeDirection = "down"
else:
pass
# above codes can be insteaded by the following codes
# if (event.keysym == 'Right' or event.keysym == 'd') and self.snakeDirection != 'left':
# self.snakeMove = [1,0]
# self.snakeDirection = "right"
# elif (event.keysym == 'Up' or event.keysym == 'w') and self.snakeDirection != 'down':
# self.snakeMove = [0,-1]
# self.snakeDirection = "up"
# elif (event.keysym == 'Left' or event.keysym == 'a') and self.snakeDirection != 'right':
# self.snakeMove = [-1,0]
# self.snakeDirection = "left"
# elif (event.keysym == 'Down' or event.keysym == 's') and self.snakeDirection != 'up':
# self.snakeMove = [0,1]
# self.snakeDirection = "down"
# else:
# pass
def play(self):
self.canvas.bind("<Key>",self.move)
self.canvas.focus_set()
while True:
if self.isdead():
self.gameover()
break
elif self.iseated():
self.snakeX[0] += self.snakeMove[0]*self.step
self.snakeY[0] += self.snakeMove[1]*self.step
self.snakeX.insert(1,self.position[self.d][0])
self.snakeY.insert(1,self.position[self.d][1])
self.draw_score()
self.ruin()
self.draw_snake()
if self.gamescore0%50==0:
self.draw_level()
else:
self.draw_snake()
self.canvas.after(200-15*(self.gamelevel))
self.canvas.update()
def gameover(self):
highscoreC=0
self.memoryscore0.append(self.gamescore0)
for el in self.memoryscore0:
if el > highscoreC:
highscoreC=el
if self.gamescore0==highscoreC:
highscore0.remove(highscore0[0])
highscore0.append(highscoreC)
file_name0='highscoreC.txt'
text0=open(file_name0,'a')
text0.write(str(highscoreC)+'、'+str(username[0])+ '、'+str(usernumber[0])+' \n')
text0.close()
self.canvas.unbind('<Key>')
self.canvas.bind("<Key>",self.restart)
self.canvas.create_text(270,180,text=" Game Over!\n \
Press any key to continue",font='Helvetica -30 bold',tags='text')
def restart(self,event):
self.canvas.delete("snake",'text')
for t in range(len(self.position)):
self.canvas.delete(self.position[t][2])
self.canvas.unbind('<Key>')
# to initialize the snake in the range of (191,191,341,341)
r=random.randrange(191,191+15*10,self.step)
self.snakeX=[r,r+self.step,r+self.step*2]
self.snakeY=[r,r,r]
# to initialize the moving direction
self.snakeDirection = 'left'
self.snakeMove = [-1,0]
# reset the score to zero
self.gamelevel=1
self.gamescore0=-10
self.draw_score()
self.draw_level()
# to initialize the game (food and snake)
self.draw_food()
self.draw_snake()
# to play the game
self.play()
class HighScore:
def __init__(self):
window=Tk()
window.title('HighScore')
self.canvas = Canvas(window, width = 400, height = 300,bg='white')
self.canvas.pack()
self.canvas.create_text(120,30,text="EglishModdle Highest Score: "+str(highscore[0]))
self.canvas.create_text(120,100,text="ClassicalModdle Highest Score: "+str(highscore0[0]))
self.canvas.create_text(120,170,text="Your Name: "+str(username[0]))
self.canvas.create_text(120,240,text="Your Number: "+str(usernumber[0]))
window.mainloop()
class WordsList:
def __init__(self):
window=Tk()
window.title('WordsList')
self.canvas = Canvas(window, width = 300, height = 300,bg='white')
self.canvas.pack()
for self.u in range(len(lst)):
if self.u<=3:
self.canvas.create_text(50,60+40*self.u,text=lst[self.u])
elif 3<self.u<=7:
self.canvas.create_text(150,60+40*(self.u-4),text=lst[self.u])
elif 7<self.u<=11:
self.canvas.create_text(200,60+40*(self.u-8),text=lst[self.u])
elif 11<self.u<=15:
self.canvas.create_text(250,60+40*(self.u-12),text=lst[self.u])
# self.canvas.create_text(50,60,text='clear')
# self.canvas.create_text(50,100,text='cat')
# self.canvas.create_text(50,140,text='move')
# self.canvas.create_text(50,180,text='dog')
# self.canvas.create_text(150,60,text='chemistry')
# self.canvas.create_text(150,100,text='flur')
# self.canvas.create_text(150,140,text='right')
# self.canvas.create_text(150,180,text='heal')
# self.canvas.create_text(200,60,text='python')
# self.canvas.create_text(200,100,text='jerk')
# self.canvas.create_text(200,140,text='land')
# self.canvas.create_text(200,180,text='mark')
# self.canvas.create_text(250,60,text='player')
# self.canvas.create_text(250,100,text='pear')
# self.canvas.create_text(250,140,text='glue')
# self.canvas.create_text(250,180,text='project')
window.mainloop()
class UserName:
def __init__(self):
self.window=Tk()
self.window.title("UserInformation")
self.canvas = Canvas(self.window, width = 300, height = 100,bg='yellow')
frame=Frame(self.window)
self.canvas.pack()
frame.pack()
self.canvas.create_text(150,50,text='Welcome To SnakeGame')
Label(frame,text = "Your Name:").grid(row=1,column=1)
Label(frame,text = 'Your Number:').grid(row=2,column=1)
self.usernameVar=StringVar()
entry1=Entry(frame,textvariable=self.usernameVar).grid(row=1,column=3)
self.usernumberVar=StringVar()
entry2=Entry(frame,textvariable=self.usernumberVar).grid(row=2,column=3)
button=Button(frame,text="StudyEnglish Moddle",command=self.StudyEnglish).grid(row=4,column=3)
button1=Button(frame,text="HighScore",command=self.highscore).grid(row=3,column=1)
button2=Button(frame,text="WordsList",command=self.wordslist).grid(row=3,column=3)
button3=Button(frame,text="Classical Moddle",command=self.Classical).grid(row=4,column=1)
self.window.mainloop()
def StudyEnglish(self):
username.append(self.usernameVar.get())
usernumber.append(self.usernumberVar.get())
#self.window.destroy()
SnakeGameStudyEnglish()
def highscore(self):
username.append(self.usernameVar.get())
usernumber.append(self.usernumberVar.get())
HighScore()
def wordslist(self):
username.append(self.usernameVar.get())
usernumber.append(self.usernumberVar.get())
WordsList()
def Classical(self):
username.append(self.usernameVar.get())
usernumber.append(self.usernumberVar.get())
SnakeGameClassical()
UserName()
|
[
"735422760@qq.com"
] |
735422760@qq.com
|
06027acd18d5ffdf8c7e46375d00f52b1c3c56d6
|
0461798face297be283bcd5c486add9851672ac8
|
/usu04a.py
|
fe44b51f7671c706f59f7a11c37f2876bdcde82c
|
[] |
no_license
|
chasmack/postgis
|
a4503679a5008319bfb4b250f767bab78daae7d6
|
b3e62cbc00321c17143e9cf9a8e76f43cb351355
|
refs/heads/master
| 2016-08-11T17:39:45.271484
| 2016-01-12T22:07:53
| 2016-01-12T22:07:53
| 47,844,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,719
|
py
|
# USU OS Python Assignment 4a
#
# Print the pixel values for all three bands of aster.img at
# the points contained in sites.shp.
import gdal, ogr
from gdalconst import *
import time
import utils
ogr.UseExceptions()
aster_rasterfile = 'data/usu04/aster.img'
sites_shapefile = 'data/usu04/sites.shp'
sites_schema = 'usu'
sites_layername = 'sites'
sites_srid = 32612
DSN = 'PG: host=localhost dbname=postgis_scratch user=postgres password=pg'
# Open a connection to PostgreSQL.
pgDS = ogr.Open(DSN, 1)
# Open the sites shapefile.
shpDriver = ogr.GetDriverByName('ESRI Shapefile')
sitesDS = shpDriver.Open(sites_shapefile, 0)
if sitesDS is None:
print('Can''t open shapefile ' + sites_shapefile)
exit(1)
# Create a PostGIS table to hold the sites data.
qstr = """
DROP TABLE IF EXISTS {schema}.{layer};
CREATE TABLE {schema}.{layer} (
gid serial NOT NULL,
site_id integer,
cover character varying(48),
geom geometry({geomtype}, {srid}),
CONSTRAINT {layer}_pkey PRIMARY KEY (gid))
WITH (OIDS=FALSE);
""".format(schema=sites_schema, layer=sites_layername, geomtype='Point', srid=sites_srid)
pgDS.ExecuteSQL(qstr)
# Copy features from the sites shapefile to the PostGIS table.
pgLayer = pgDS.GetLayerByName(sites_schema + '.' + sites_layername)
pgFeatureDefn = pgLayer.GetLayerDefn()
for feat in sitesDS.GetLayer():
pgFeature = ogr.Feature(pgFeatureDefn)
pgFeature.SetField('site_id', feat.GetField('id'))
pgFeature.SetField('cover', feat.GetField('cover'))
pgFeature.SetGeometry(feat.GetGeometryRef())
pgLayer.CreateFeature(pgFeature)
# Done with the shapefile.
shpDriver = sitesDS = None
# Register the raster driver and open the data source.
rastDriver = gdal.GetDriverByName('HFA')
rastDriver.Register()
ds = gdal.Open(aster_rasterfile, GA_ReadOnly)
if ds is None:
print('Can''t open raster data file ' + aster_rasterfile)
exit(1)
cols = ds.RasterXSize
rows = ds.RasterYSize
bands = ds.RasterCount
print('\nRaster file: ' + aster_rasterfile)
print('Rows x Columns x Bands: {0:d} x {1:d} x {2:d}'.format(rows, cols, bands))
geotransform = ds.GetGeoTransform()
x0, dx, rx, y0, ry, dy = geotransform
print('\nTop-Left corner (x,y): {0:12.4f}, {1:12.4f}'.format(x0, y0))
print('Pixel resolution (x,y): {0:12.4f}, {1:12.4f}'.format(dx, dy))
print('Axis rotation (x,y): {0:12.4f}, {1:12.4f}'.format(rx, ry))
# Read point coordinates (x,y) and cover from the sites geometry into a list.
qstr = """
SELECT site_id, cover, geom,
ST_AsBearing(ST_Azimuth(garden_city, geog)) AS bearing,
ST_Distance(garden_city, geog) / 1000 AS dist_km
FROM {schema}.{layer},
LATERAL (
-- using geography to give true bearings
SELECT
ST_PointFromText('POINT(-111.393384 41.946642)', 4326)::Geography AS garden_city,
ST_Transform(geom, 4326)::Geography AS geog
) AS a
ORDER BY dist_km;
""".format(schema=sites_schema, layer=sites_layername)
pgLayer = pgDS.ExecuteSQL(qstr)
sites = []
for feat in pgLayer:
coords = feat.GetGeometryRef().GetPoint_2D()
sites.append({
'site_id' : feat.GetField('site_id'),
'coords' : coords,
'cover' : feat.GetField('cover'),
'dist_km' : feat.GetField('dist_km'),
'bearing' : feat.GetField('bearing'),
'offset' : utils.get_raster_offset(coords, geotransform),
'data' : [],
})
pgDS.ReleaseResultSet(pgLayer)
startTime = time.time()
READ_ENTIRE_BAND = False
if READ_ENTIRE_BAND:
print('\nReading entire band ...')
else:
print('\nReading one pixel at a time ...')
for n in (1,2,3):
band = ds.GetRasterBand(n)
if (READ_ENTIRE_BAND):
# Read entire band into an array.
data = band.ReadAsArray(0, 0, cols, rows)
for site in sites:
col, row = site['offset']
site['data'].append(data[row, col])
else:
# Read one pixel at a time.
for site in sites:
xoff, yoff = site['offset']
data = band.ReadAsArray(xoff, yoff, 1, 1)
site['data'].append(data[0, 0])
band = data = None
print('\n<id>: <northing>, <easting>, <bearing> <distance>: <cover> = (<b1>, <b2>, <b3>).')
print('\nCoordinates are UTM 12N Northing, Easting (meters).')
print('Bearing and Distance are straight line from Garden City, UT.')
print()
for site in sites:
x, y= site['coords']
fmt = '{0:2d}: {1:.4f}, {2:.4f}, {3:s} {4:5.2f} km: {5:>6s} = ({6:2d}, {7:2d}, {8:2d})'
print(fmt.format(
site['site_id'], y, x, site['bearing'], site['dist_km'],
site['cover'], *site['data']
))
endTime = time.time()
print('\ntime: {0:.3f} sec'.format(endTime - startTime))
# Done.
ds = None
exit(0)
|
[
"chas.mack@asis.com"
] |
chas.mack@asis.com
|
8a3169a1f97b905640130d9aea6dea851330c763
|
fc0e0379889bda5f3b7b7759983b1183ba73f7c9
|
/dnppy/download/download_filelist.py
|
f46b61e7e43aa23ee380a17d506a968e3275b8ea
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ritviksahajpal/dnppy
|
3a52d47f9926b1aff55da46cc45dde09a403ad02
|
2acf9c565ea2f5c79b5cd3d776a35aedd2d31e87
|
refs/heads/master
| 2021-01-15T21:45:00.924505
| 2015-09-30T14:59:29
| 2015-09-30T14:59:29
| 45,763,646
| 1
| 0
| null | 2015-11-08T02:21:51
| 2015-11-08T02:21:51
| null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
__author__ = 'jwely'
from dnppy import core
from download_urls import download_urls
import os, time
__all__ = ["download_filelist"]
def download_filelist(ftp_texts, file_type = None, outdir = None):
"""
Reads text file of download links, downloads them.
This script reads a text file with urls such as those output from ECHO REVERB
and outputs them to an output directory. It will retry failed links 20 times before
giving up and outputting a warning to the user.
:param ftp_texts: array of txt files ordered from reverb containing ftp links
:param file_type: file extension of the desired files, leave blank or False to grab all types.
:param outdir: folder where files are to be placed after download
:return list failed: list of files which failed to download after the end of the script.
"""
failed = []
# force inputs to take list format
ftp_texts = core.enf_list(ftp_texts)
if file_type is not None:
file_type = core.enf_list(file_type)
for ftptext in ftp_texts:
#verify that things exist.
core.exists(ftptext)
if not outdir:
outdir,_ = os.path.split(ftptext)
ftp = open(ftptext,'r')
sites = ftp.readlines()
print("Attempting to download {0} files!".format(len(sites)))
print("Saving all files to {0}".format(outdir))
# perform the first attempt
failed = download_urls(sites, outdir, file_type)
# for 19 more times, if there are still items in the failed list, try again
for i in range(1,19):
if len(failed)>0:
print("retry number {0} to grab {1} failed downloads!".format(i,len(failed)))
time.sleep(60)
failed = download_urls(failed, file_type, outdir)
# once all tries are complete, print a list of files which repeatedly failed
if len(failed)>0:
print('Files at the following URLs have failed 20 download attempts')
print('Manually verify that these files exist on the server:')
for i in failed:
print(i)
else:
print('Finished with no errors!')
# close the open text files and finish up
ftp.close()
return failed
# testing area
if __name__ == "__main__":
download_filelist("reverb_filelist.txt",
outdir = r"C:\Users\jwely\Desktop\troubleshooting\rawMODIS")
|
[
"jeff.ely.08@gmail.com"
] |
jeff.ely.08@gmail.com
|
a799df0fcaf22dcd868fec9bb264a4992a0d23cf
|
3bd6c53bff12f7450eafb9750aef8d9755c1643d
|
/Flask-blog/app/venv/Lib/site-packages/flask/cli.py
|
0c7adbbb1342fbb31b22612942783c9793172264
|
[] |
no_license
|
harsht24/Blog-flask-web-app
|
10872475d166cc6c11a733b6c7eb49eefe7f68a0
|
9605ec67ab20005408786d3da5c5ff5ea6321560
|
refs/heads/master
| 2022-11-29T16:55:21.282679
| 2019-12-08T09:51:18
| 2019-12-08T09:51:18
| 214,374,818
| 0
| 1
| null | 2022-11-18T23:15:11
| 2019-10-11T07:43:28
|
Python
|
UTF-8
|
Python
| false
| false
| 30,939
|
py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from __future__ import print_function
import ast
import inspect
import os
import platform
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from ._compat import getargspec
from ._compat import itervalues
from ._compat import reraise
from ._compat import text_type
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
# explicitly delete tb as it is circular referenced
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from . import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
message = "Python %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s"
click.echo(
message
% {
"python": platform.python_version(),
"flask": __version__,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "run.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"run.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.venv` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.venv` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an venv var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".venv") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .venv or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".venv", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
" WARNING: This is a development server. "
"Do not use it in a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super(SeparatedPathType, self).convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
" are separated by '{}'.".format(os.path.pathsep)
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
cli.main(prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
|
[
"htamkiya24@gmail.com"
] |
htamkiya24@gmail.com
|
ca0abc7ff8fd53a24c8109ee36bf48ce9d1726e7
|
543d43411cdb6623f8dc7a1c9578d68113497791
|
/fplo/fplo.py
|
f47340c06727d9c31759a28d4120978a851e0341
|
[] |
no_license
|
DMFTSever/wannlib
|
4a46ce23df89b9d175b13b24155ebe2d40acdc07
|
459fdb7816ccffecfeb0a4371b981debaca97c29
|
refs/heads/master
| 2021-07-06T02:13:14.641259
| 2021-04-19T10:04:49
| 2021-04-19T10:04:49
| 232,587,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
#########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED########## #########NEEDS TO BE TESTED##########
#########NEEDS TO BE TESTED##########
#########NOT WORKING##########
import sys
import os
sys.path.insert(0,sys.argv[0].replace('/wannlib/fplo/fplo.py',''))
import numpy as np
#from . import fplo_core as fplo
from wannlib.fplo import fplo_core as fplo
import wannlib.core.wannlib_core as core
def make_wannier90_hk(fnameHk, meshDims, fnameHr):
'''
Routine to read fplo H(r) and Fourier transfrom it to
a monhorst pack kmesh with user specifed dimensions
'''
rbasis, hr = fplo.read_fplo(fname=fnameHr)
mesh = core.generate_direct_coord_monkhorst_pack_kmesh(npoints=meshDims).reshape(meshDims.prod(),3)
hk = fplo.fourier_transform_hr(hr=hr,kpoints=mesh)
core.write_wannier90_hk(fname=fnameHk, hk=hk, kpoints=mesh)
def generate_bandstructure_from_fplo(steps, points, fnameHr):
#TODO GO ON HERE fix it!!!
'''
Generating a Bandstructure form FPLO outpout.
'''
rbasis, hr = fplo.read_fplo(fname=fnameHr)
print(rbasis.shape, hr.shape)
print(rbasis)
kbasis = 2.*np.pi*np.linalg.inv(rbasis)
kpath, cartkpath = core.generate_k_path(steps=steps, points=points, kbasis=kbasis)
diffs = np.linalg.norm(cartkpath[1:]-cartkpath[0:-1],axis=1)
dist = np.array([np.sum(diffs[:i]) for i in range(0,len(cartkpath))])
hk = fplo.fourier_transform_hr(hr=hr, kpoints=cartkpath)
print(cartkpath.shape, hk.shape)
ee, ev = np.linalg.eigh(hk)
#IT ALREADY WRONG HERE!
print(ee.shape)
np.savetxt('test.dat', ee)
return dist, ee, ev
def make_bandstructure_from_fplo(fnameOut, steps, points, fnameHr):
distance, eigenenergies, eigenvectors = generate_bandstructure_from_fplo(steps=steps, points=points, fnameHr=fnameHr)
nintervals=len(points)-1
norb = eigenenergies.shape[-1]
nenergyvec = np.repeat(np.arange(1,norb+1),steps*nintervals)
eigenenergies = eigenenergies.flatten(order='F')
eigenvectors = eigenvectors.transpose(1,0,2).reshape((eigenvectors.shape[0]*eigenvectors.shape[1],eigenvectors.shape[2]))
projections = np.real(eigenvectors*eigenvectors.conjugate())
distance = np.repeat(distance[:,None], repeats=norb, axis=1).flatten(order='F')
bands = np.concatenate((nenergyvec[:,None], distance[:,None], eigenenergies[:,None], projections), axis=1)
formatspec = ['%6d','%12.8f','%12.8f']
for i in range(norb):
formatspec += ['%12.8f']
with open(fnameOut,'w') as outstream:
outstream.write("#PATH\n#")
for point in points[:-1]:
outstream.write(" {} -".format(point))
outstream.write(" {}".format(points[-1]))
outstream.write("\n\n")
outstream.write('{index:^6} {kdist:^12} {energy:^12}'.format(index='#index' ,kdist='kdist', energy='energy'))
for i in range(norb):
outstream.write(' {orb:^12}'.format(orb='orb'+str((i+1))))
outstream.write('\n')
for i in range(0,norb):
for j in range(0,nintervals):
np.savetxt(fname=outstream, X=bands[(i*nintervals+j)*steps:(i*nintervals+j+1)*steps], fmt=formatspec)
outstream.write('\n')
outstream.write('\n')
if __name__ == '__main__':
import sys
import os
sys.path.insert(0,sys.argv[0].replace('/wannlib/fplo/fplo.py',''))
from wannlib.core import wannlib
from wannlib.core import wannlib_core as core
make_bandstructure_from_fplo('bands.dat', 50, np.array([[0,0,0],[0.5,0.5,0]]),'hamdata')
|
[
"severino-adler@gmx.at"
] |
severino-adler@gmx.at
|
71064b46b0fae8627c888907c272fc24bd26f089
|
49edfa75ca899a2ff8c77420b81f6991e325c264
|
/manage.py
|
fafb9cc1d8aecfdaf4e9bc9d798720150e79b8ac
|
[
"MIT"
] |
permissive
|
renelhs/cellfixstore
|
5dd39321c443cd925cca5ffa9b1448d6e7aea4fb
|
040f66be7bdb506661ba9d4cf6125d5eaf56711c
|
refs/heads/master
| 2023-04-29T18:50:48.405418
| 2022-05-17T23:11:48
| 2022-05-17T23:11:48
| 195,576,978
| 7
| 1
|
MIT
| 2023-04-21T20:53:36
| 2019-07-06T19:41:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CellFixStore.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"reneluishs@gmail.com"
] |
reneluishs@gmail.com
|
e56b7daab727de9003a359bfb002d938fe3de0a0
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/homeassistant/components/unifiprotect/models.py
|
40280c028679c32d62305d9bc8484ac72a4f0966
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518
| 2023-05-07T20:03:37
| 2023-05-07T20:03:37
| 109,931,626
| 24
| 10
|
Apache-2.0
| 2023-02-22T06:24:01
| 2017-11-08T05:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
"""The unifiprotect integration models."""
from __future__ import annotations
from collections.abc import Callable, Coroutine
from dataclasses import dataclass
from enum import Enum
import logging
from typing import Any, Generic, TypeVar, cast
from pyunifiprotect.data import NVR, Event, ProtectAdoptableDeviceModel
from homeassistant.helpers.entity import EntityDescription
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
T = TypeVar("T", bound=ProtectAdoptableDeviceModel | NVR)
class PermRequired(int, Enum):
"""Type of permission level required for entity."""
NO_WRITE = 1
WRITE = 2
DELETE = 3
@dataclass
class ProtectRequiredKeysMixin(EntityDescription, Generic[T]):
"""Mixin for required keys."""
ufp_required_field: str | None = None
ufp_value: str | None = None
ufp_value_fn: Callable[[T], Any] | None = None
ufp_enabled: str | None = None
ufp_perm: PermRequired | None = None
def get_ufp_value(self, obj: T) -> Any:
"""Return value from UniFi Protect device."""
if self.ufp_value is not None:
return get_nested_attr(obj, self.ufp_value)
if self.ufp_value_fn is not None:
return self.ufp_value_fn(obj)
# reminder for future that one is required
raise RuntimeError( # pragma: no cover
"`ufp_value` or `ufp_value_fn` is required"
)
def get_ufp_enabled(self, obj: T) -> bool:
"""Return value from UniFi Protect device."""
if self.ufp_enabled is not None:
return bool(get_nested_attr(obj, self.ufp_enabled))
return True
def has_required(self, obj: T) -> bool:
"""Return if has required field."""
if self.ufp_required_field is None:
return True
return bool(get_nested_attr(obj, self.ufp_required_field))
@dataclass
class ProtectEventMixin(ProtectRequiredKeysMixin[T]):
"""Mixin for events."""
ufp_event_obj: str | None = None
ufp_smart_type: str | None = None
def get_event_obj(self, obj: T) -> Event | None:
"""Return value from UniFi Protect device."""
if self.ufp_event_obj is not None:
return cast(Event, get_nested_attr(obj, self.ufp_event_obj))
return None
def get_is_on(self, obj: T) -> bool:
"""Return value if event is active."""
value = bool(self.get_ufp_value(obj))
if value:
event = self.get_event_obj(obj)
value = event is not None
if not value:
_LOGGER.debug("%s (%s): missing event", self.name, obj.mac)
if event is not None and self.ufp_smart_type is not None:
value = self.ufp_smart_type in event.smart_detect_types
if not value:
_LOGGER.debug(
"%s (%s): %s not in %s",
self.name,
obj.mac,
self.ufp_smart_type,
event.smart_detect_types,
)
if value:
_LOGGER.debug("%s (%s): value is on", self.name, obj.mac)
return value
@dataclass
class ProtectSetableKeysMixin(ProtectRequiredKeysMixin[T]):
"""Mixin for settable values."""
ufp_set_method: str | None = None
ufp_set_method_fn: Callable[[T, Any], Coroutine[Any, Any, None]] | None = None
async def ufp_set(self, obj: T, value: Any) -> None:
"""Set value for UniFi Protect device."""
_LOGGER.debug("Setting %s to %s for %s", self.name, value, obj.display_name)
if self.ufp_set_method is not None:
await getattr(obj, self.ufp_set_method)(value)
elif self.ufp_set_method_fn is not None:
await self.ufp_set_method_fn(obj, value)
|
[
"noreply@github.com"
] |
konnected-io.noreply@github.com
|
02363f4518e3339d42cf6906fe43a45d3a73b017
|
79b186222cc8ee6442522f4c5c3ebba4b835f4b4
|
/seqlib/gff/add_gene_id.py
|
6df07977943317c5adb67080ced856beabc0bd56
|
[] |
no_license
|
sudmantlab/seqlib
|
2b669df20a862d84e348d6fea7a128ef64729d64
|
071a72f27cd0c160618404850f23b8666a8d74e2
|
refs/heads/master
| 2022-06-27T01:02:36.381101
| 2022-06-16T19:14:15
| 2022-06-16T19:14:15
| 148,357,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
import sys
if __name__=="__main__":
for l in sys.stdin:
if l[0] == "#":
continue
l=l.rstrip()
sl = l.split()
if "geneID" in sl[-1]:
d={e.split("=")[0]:e.split("=")[1] for e in sl[-1].split(";")}
d["gene_id"] = d['geneID']
sl[-1] = ";".join(["%s=%s"%(k,v) for k,v in d.iteritems()])
print("\t".join(sl))
|
[
"psudmant@login001.cm.cluster"
] |
psudmant@login001.cm.cluster
|
b55ff943b7348ac97fe1fa40cdb31aa99fe65765
|
878eb4b539d77051dd7330389b90d988d9aef8f3
|
/CAPITULO 8/Exercicio D.py
|
84d36064619169f2f794772564090700dfaff17a
|
[
"MIT"
] |
permissive
|
LarmIg/Algoritmos-Python
|
a031badc9f607cbbc109ee4ca8bfe60d5636d867
|
f2c9889705cacac007833f6ab9a413b06213f882
|
refs/heads/master
| 2022-11-25T05:36:26.313469
| 2020-07-25T11:59:36
| 2020-07-25T11:59:36
| 282,436,201
| 0
| 0
|
MIT
| 2020-07-25T12:08:13
| 2020-07-25T12:08:12
| null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
# Elaborar um programa que leia uma matriz A de uma dimensão com dez elementos inteiros. Construir uma matriz C de duas dimensões com três colunas, sendo a primeira coluna da matriz C formada pelos elementos da matriz A somados com 5, a segunda coluna seja formada pelo valor do cálculo da fatorial de cada elemento correspondente da matriz A, e a terceira e última coluna pelos quadrados dos elementos correspondentes da matriz A. Apresentar a matriz C
A = []
C= [[], [], []]
for i in range(10):
A.append(int(input('Informe um valor para matriz A: ')))
for i in range(1):
for j in range(10):
C[i].append(int(A[j] + 5))
for i in range(1, 2):
for j in range(10):
n = A[j]
result = 1
for x in range(1, n + 1):
result *= x
C[i].append(result)
for i in range(2, 3):
for j in range(10):
C[i].append(int(A[j] ** 2))
print('='*40)
for i in range(len(C)):
for j in range(7):
print('C[{}][{}] = {}'.format(i, j, C[i][j]))
|
[
"noreply@github.com"
] |
LarmIg.noreply@github.com
|
9077b9c2ed66691d447341572e390acefbb7102b
|
d0e083ce351b9680f1fe46a5216f605aefc47018
|
/Everything/Daily_Backup/multiply_fits.py
|
dd5b84f66ef69ef9b9d51722a972782bf9fbb9a4
|
[] |
no_license
|
bhishanpdl/Backups
|
ebfad64cc4f0a0999064f5f4dccf2320c7db8329
|
e041a6c3f4a15c9e1f3eee88b6ea745fd0ea9add
|
refs/heads/master
| 2020-04-27T09:18:32.444634
| 2019-03-06T19:51:29
| 2019-03-06T19:51:29
| 174,209,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-#
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : May 10, 2017 Wed
# Last update : Jun 19, 2017 Mon
# Est time : 0.001 sec
#
# Imports
import numpy as np
import sys,os,shutil
import random
from astropy.io import fits
from astropy import wcs
def multiply_fits(fitsfile,value):
# Get data
data = fits.getdata(fitsfile) * value
# Write output file
print('Overwriting: ', fitsfile)
fits.writeto(fitsfile,data,clobber=True)
def main():
fitsfile = 'trial1_HST.fits'
value = 1000
multiply_fits(fitsfile,value)
if __name__ == '__main__':
main()
|
[
"bhishantryphysics@gmail.com"
] |
bhishantryphysics@gmail.com
|
4a2953c41e514a7923931593ca37e3dd158a7b2a
|
5277d3db7b60898536f50ff431de3caeb8e58862
|
/src/sensor.py
|
a312f9070cba4aee46cb5bf023e34e711e913cba
|
[
"MIT"
] |
permissive
|
alexaldr/python-unit-tests
|
7696fef5e2ad68a740914b1a215bb42e8295ee0a
|
94512762e96de570eec0f980cb1d94899e580df3
|
refs/heads/master
| 2023-04-13T20:02:54.230865
| 2020-05-01T02:36:07
| 2020-05-01T02:36:07
| 259,465,259
| 0
| 0
|
MIT
| 2021-04-20T19:56:10
| 2020-04-27T21:53:33
|
Python
|
UTF-8
|
Python
| false
| false
| 62
|
py
|
class Sensor:
def sample_pressure(self):
pass
|
[
"alex.aldr@gmail.com"
] |
alex.aldr@gmail.com
|
fa825e94d5fc57e894508ca22017e6457327263a
|
55a273347cb103fe2b2704cb9653956956d0dd34
|
/code/tmp_rtrip/sqlite3/test/types.py
|
d816fa20af66bdd8e104eabc6a7b8851ebb72291
|
[
"MIT"
] |
permissive
|
emilyemorehouse/ast-and-me
|
4af1bc74fc967ea69ac1aed92664f6428acabe6a
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
refs/heads/master
| 2022-11-18T03:50:36.505882
| 2018-05-12T17:53:44
| 2018-05-12T17:53:44
| 115,035,148
| 25
| 1
|
MIT
| 2022-11-04T11:36:43
| 2017-12-21T18:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 13,998
|
py
|
import datetime
import unittest
import sqlite3 as sqlite
try:
import zlib
except ImportError:
zlib = None
class SqliteTypeTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(':memory:')
self.cur = self.con.cursor()
self.cur.execute(
'create table test(i integer, s varchar, f number, b blob)')
def tearDown(self):
self.cur.close()
self.con.close()
def CheckString(self):
self.cur.execute('insert into test(s) values (?)', ('Österreich',))
self.cur.execute('select s from test')
row = self.cur.fetchone()
self.assertEqual(row[0], 'Österreich')
def CheckSmallInt(self):
self.cur.execute('insert into test(i) values (?)', (42,))
self.cur.execute('select i from test')
row = self.cur.fetchone()
self.assertEqual(row[0], 42)
def CheckLargeInt(self):
num = 2 ** 40
self.cur.execute('insert into test(i) values (?)', (num,))
self.cur.execute('select i from test')
row = self.cur.fetchone()
self.assertEqual(row[0], num)
def CheckFloat(self):
val = 3.14
self.cur.execute('insert into test(f) values (?)', (val,))
self.cur.execute('select f from test')
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckBlob(self):
sample = b'Guglhupf'
val = memoryview(sample)
self.cur.execute('insert into test(b) values (?)', (val,))
self.cur.execute('select b from test')
row = self.cur.fetchone()
self.assertEqual(row[0], sample)
def CheckUnicodeExecute(self):
self.cur.execute("select 'Österreich'")
row = self.cur.fetchone()
self.assertEqual(row[0], 'Österreich')
class DeclTypesTests(unittest.TestCase):
class Foo:
def __init__(self, _val):
if isinstance(_val, bytes):
_val = _val.decode('utf-8')
self.val = _val
def __eq__(self, other):
if not isinstance(other, DeclTypesTests.Foo):
return NotImplemented
return self.val == other.val
def __conform__(self, protocol):
if protocol is sqlite.PrepareProtocol:
return self.val
else:
return None
def __str__(self):
return '<%s>' % self.val
def setUp(self):
self.con = sqlite.connect(':memory:', detect_types=sqlite.
PARSE_DECLTYPES)
self.cur = self.con.cursor()
self.cur.execute(
'create table test(i int, s str, f float, b bool, u unicode, foo foo, bin blob, n1 number, n2 number(5))'
)
sqlite.converters['FLOAT'] = lambda x: 47.2
sqlite.converters['BOOL'] = lambda x: bool(int(x))
sqlite.converters['FOO'] = DeclTypesTests.Foo
sqlite.converters['WRONG'] = lambda x: 'WRONG'
sqlite.converters['NUMBER'] = float
def tearDown(self):
del sqlite.converters['FLOAT']
del sqlite.converters['BOOL']
del sqlite.converters['FOO']
del sqlite.converters['NUMBER']
self.cur.close()
self.con.close()
def CheckString(self):
self.cur.execute('insert into test(s) values (?)', ('foo',))
self.cur.execute('select s as "s [WRONG]" from test')
row = self.cur.fetchone()
self.assertEqual(row[0], 'foo')
def CheckSmallInt(self):
self.cur.execute('insert into test(i) values (?)', (42,))
self.cur.execute('select i from test')
row = self.cur.fetchone()
self.assertEqual(row[0], 42)
def CheckLargeInt(self):
num = 2 ** 40
self.cur.execute('insert into test(i) values (?)', (num,))
self.cur.execute('select i from test')
row = self.cur.fetchone()
self.assertEqual(row[0], num)
def CheckFloat(self):
val = 3.14
self.cur.execute('insert into test(f) values (?)', (val,))
self.cur.execute('select f from test')
row = self.cur.fetchone()
self.assertEqual(row[0], 47.2)
def CheckBool(self):
self.cur.execute('insert into test(b) values (?)', (False,))
self.cur.execute('select b from test')
row = self.cur.fetchone()
self.assertEqual(row[0], False)
self.cur.execute('delete from test')
self.cur.execute('insert into test(b) values (?)', (True,))
self.cur.execute('select b from test')
row = self.cur.fetchone()
self.assertEqual(row[0], True)
def CheckUnicode(self):
val = 'Österreich'
self.cur.execute('insert into test(u) values (?)', (val,))
self.cur.execute('select u from test')
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckFoo(self):
val = DeclTypesTests.Foo('bla')
self.cur.execute('insert into test(foo) values (?)', (val,))
self.cur.execute('select foo from test')
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckUnsupportedSeq(self):
class Bar:
pass
val = Bar()
with self.assertRaises(sqlite.InterfaceError):
self.cur.execute('insert into test(f) values (?)', (val,))
def CheckUnsupportedDict(self):
class Bar:
pass
val = Bar()
with self.assertRaises(sqlite.InterfaceError):
self.cur.execute('insert into test(f) values (:val)', {'val': val})
def CheckBlob(self):
sample = b'Guglhupf'
val = memoryview(sample)
self.cur.execute('insert into test(bin) values (?)', (val,))
self.cur.execute('select bin from test')
row = self.cur.fetchone()
self.assertEqual(row[0], sample)
def CheckNumber1(self):
self.cur.execute('insert into test(n1) values (5)')
value = self.cur.execute('select n1 from test').fetchone()[0]
self.assertEqual(type(value), float)
def CheckNumber2(self):
"""Checks whether converter names are cut off at '(' characters"""
self.cur.execute('insert into test(n2) values (5)')
value = self.cur.execute('select n2 from test').fetchone()[0]
self.assertEqual(type(value), float)
class ColNamesTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(':memory:', detect_types=sqlite.
PARSE_COLNAMES)
self.cur = self.con.cursor()
self.cur.execute('create table test(x foo)')
sqlite.converters['FOO'] = lambda x: '[%s]' % x.decode('ascii')
sqlite.converters['BAR'] = lambda x: '<%s>' % x.decode('ascii')
sqlite.converters['EXC'] = lambda x: 5 / 0
sqlite.converters['B1B1'] = lambda x: 'MARKER'
def tearDown(self):
del sqlite.converters['FOO']
del sqlite.converters['BAR']
del sqlite.converters['EXC']
del sqlite.converters['B1B1']
self.cur.close()
self.con.close()
def CheckDeclTypeNotUsed(self):
"""
Assures that the declared type is not used when PARSE_DECLTYPES
is not set.
"""
self.cur.execute('insert into test(x) values (?)', ('xxx',))
self.cur.execute('select x from test')
val = self.cur.fetchone()[0]
self.assertEqual(val, 'xxx')
def CheckNone(self):
self.cur.execute('insert into test(x) values (?)', (None,))
self.cur.execute('select x from test')
val = self.cur.fetchone()[0]
self.assertEqual(val, None)
def CheckColName(self):
self.cur.execute('insert into test(x) values (?)', ('xxx',))
self.cur.execute('select x as "x [bar]" from test')
val = self.cur.fetchone()[0]
self.assertEqual(val, '<xxx>')
self.assertEqual(self.cur.description[0][0], 'x')
def CheckCaseInConverterName(self):
self.cur.execute('select \'other\' as "x [b1b1]"')
val = self.cur.fetchone()[0]
self.assertEqual(val, 'MARKER')
def CheckCursorDescriptionNoRow(self):
"""
cursor.description should at least provide the column name(s), even if
no row returned.
"""
self.cur.execute('select * from test where 0 = 1')
self.assertEqual(self.cur.description[0][0], 'x')
def CheckCursorDescriptionInsert(self):
self.cur.execute('insert into test values (1)')
self.assertIsNone(self.cur.description)
@unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), 'CTEs not supported')
class CommonTableExpressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(':memory:')
self.cur = self.con.cursor()
self.cur.execute('create table test(x foo)')
def tearDown(self):
self.cur.close()
self.con.close()
def CheckCursorDescriptionCTESimple(self):
self.cur.execute('with one as (select 1) select * from one')
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], '1')
def CheckCursorDescriptionCTESMultipleColumns(self):
self.cur.execute('insert into test values(1)')
self.cur.execute('insert into test values(2)')
self.cur.execute(
'with testCTE as (select * from test) select * from testCTE')
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], 'x')
def CheckCursorDescriptionCTE(self):
self.cur.execute('insert into test values (1)')
self.cur.execute(
'with bar as (select * from test) select * from test where x = 1')
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], 'x')
self.cur.execute(
'with bar as (select * from test) select * from test where x = 2')
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], 'x')
class ObjectAdaptationTests(unittest.TestCase):
def cast(obj):
return float(obj)
cast = staticmethod(cast)
def setUp(self):
self.con = sqlite.connect(':memory:')
try:
del sqlite.adapters[int]
except:
pass
sqlite.register_adapter(int, ObjectAdaptationTests.cast)
self.cur = self.con.cursor()
def tearDown(self):
del sqlite.adapters[int, sqlite.PrepareProtocol]
self.cur.close()
self.con.close()
def CheckCasterIsUsed(self):
self.cur.execute('select ?', (4,))
val = self.cur.fetchone()[0]
self.assertEqual(type(val), float)
@unittest.skipUnless(zlib, 'requires zlib')
class BinaryConverterTests(unittest.TestCase):
def convert(s):
return zlib.decompress(s)
convert = staticmethod(convert)
def setUp(self):
self.con = sqlite.connect(':memory:', detect_types=sqlite.
PARSE_COLNAMES)
sqlite.register_converter('bin', BinaryConverterTests.convert)
def tearDown(self):
self.con.close()
def CheckBinaryInputForConverter(self):
testdata = b'abcdefg' * 10
result = self.con.execute('select ? as "x [bin]"', (memoryview(zlib
.compress(testdata)),)).fetchone()[0]
self.assertEqual(testdata, result)
class DateTimeTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(':memory:', detect_types=sqlite.
PARSE_DECLTYPES)
self.cur = self.con.cursor()
self.cur.execute('create table test(d date, ts timestamp)')
def tearDown(self):
self.cur.close()
self.con.close()
def CheckSqliteDate(self):
d = sqlite.Date(2004, 2, 14)
self.cur.execute('insert into test(d) values (?)', (d,))
self.cur.execute('select d from test')
d2 = self.cur.fetchone()[0]
self.assertEqual(d, d2)
def CheckSqliteTimestamp(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0)
self.cur.execute('insert into test(ts) values (?)', (ts,))
self.cur.execute('select ts from test')
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
@unittest.skipIf(sqlite.sqlite_version_info < (3, 1),
'the date functions are available on 3.1 or later')
def CheckSqlTimestamp(self):
now = datetime.datetime.utcnow()
self.cur.execute('insert into test(ts) values (current_timestamp)')
self.cur.execute('select ts from test')
ts = self.cur.fetchone()[0]
self.assertEqual(type(ts), datetime.datetime)
self.assertEqual(ts.year, now.year)
def CheckDateTimeSubSeconds(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 500000)
self.cur.execute('insert into test(ts) values (?)', (ts,))
self.cur.execute('select ts from test')
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def CheckDateTimeSubSecondsFloatingPoint(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 510241)
self.cur.execute('insert into test(ts) values (?)', (ts,))
self.cur.execute('select ts from test')
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def suite():
sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, 'Check')
decltypes_type_suite = unittest.makeSuite(DeclTypesTests, 'Check')
colnames_type_suite = unittest.makeSuite(ColNamesTests, 'Check')
adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, 'Check')
bin_suite = unittest.makeSuite(BinaryConverterTests, 'Check')
date_suite = unittest.makeSuite(DateTimeTests, 'Check')
cte_suite = unittest.makeSuite(CommonTableExpressionTests, 'Check')
return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite,
colnames_type_suite, adaptation_suite, bin_suite, date_suite,
cte_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == '__main__':
test()
|
[
"emily@cuttlesoft.com"
] |
emily@cuttlesoft.com
|
f0017a2619e7ddc6b04be80e56d4b0a95489a0a6
|
cc6147d1989a28a9a7f867156288a2cad89eb353
|
/models/student.py
|
4a6d21743dba6c196164d3a0e9be49cf11cdea48
|
[] |
no_license
|
Wetschi/fastapi-mongodb-crudapp
|
68df90638cecd0f73a1b5cfb9670c5c78db22334
|
aca65167e69b2d159f9edfceee36083e5108f428
|
refs/heads/master
| 2023-08-17T00:21:34.194275
| 2021-09-15T08:39:57
| 2021-09-15T08:39:57
| 406,265,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
#import statement
from pydantic import BaseModel
class Student(BaseModel):
student_name: str
student_email: str
student_phone: str
|
[
"kujatw@gmail.com"
] |
kujatw@gmail.com
|
9ec352b1ae6bcb07940bd57c69fb4570274a0d4e
|
3b5e09b7ebba5ba915db9e70fdbf1e82e2bf101e
|
/aws/autoscale.py
|
dc9d0f5f4a667f841b8fad8da693bb1db7ccd4c0
|
[] |
no_license
|
bennettrogers/utilities
|
bbc2f9d60bcd3c38b2cbc4fded185a54c76c2100
|
e08f2277bc12061b530ce2769c16991c1c6f74ed
|
refs/heads/master
| 2021-01-10T20:35:14.522707
| 2013-05-06T21:48:04
| 2013-05-06T21:48:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
import boto
from boto.ec2.autoscale import LaunchConfiguration, AutoScalingGroup, Tag
as_conn = boto.connect_autoscale()
elb_conn = boto.connect_elb()
AUTOSCALE_CONFIGS = [
#{
# 'tier':'',
# 'lifecycle':'',
# 'instance_size':'',
# 'image_id':'',
# 'capacity':0,
# 'key_pair':'',
# 'availability_zones':[],
# 'security_groups':[],
# 'tags': {
# 'Name':'',
# 'tier':'',
# 'role':'',
# },
#},
]
for config in AUTOSCALE_CONFIGS:
resource_name = '{0}_{1}'.format(config['tier'], config['lifecycle'],)
lc_args = {
'name':resource_name,
'image_id':config['image_id'],
'key_name':config['key_pair'],
'security_groups':config['security_groups'],
'instance_type':config['instance_size'],
'spot_price': config['price'] if config['lifecycle'] is 'spot' else None,
}
# delete the launch configuration if it already exists (TODO: figure out if we can update existing configs)
if as_conn.get_all_launch_configurations(names=[resource_name]):
as_conn.delete_launch_configuration(launch_config_name=resource_name)
lc = LaunchConfiguration(**lc_args)
as_conn.create_launch_configuration(lc)
lc_obj = as_conn.get_all_launch_configurations(names=[resource_name])[0]
tags = config['tags']
as_tags = [Tag(key=key, value=value, propagate_at_launch=True, resource_id=resource_name) for key, value in tags.items()]
asg_args = {
'group_name':resource_name,
'availability_zones':config['availability_zones'],
'launch_config':lc_obj,
'min_size':config['capacity'],
'max_size':config['capacity'],
'tags':as_tags,
}
# delete the asg if it already exists (TODO: figure out if we can update existing asgs)
if as_conn.get_all_groups(names=[resource_name]):
as_conn.delete_auto_scaling_group(name=resource_name)
asg = AutoScalingGroup(**asg_args)
as_conn.create_auto_scaling_group(asg)
#==================================================
#as_conn.delete_launch_configuration(launch_config_name=resource_name)
|
[
"bennett@crocodoc.com"
] |
bennett@crocodoc.com
|
d14446b745d548751ba83f0874137e9af92e0d48
|
751fe2de18f00596e4f1ed342b56bd6f38ee2053
|
/wisdem/orbit/phases/install/turbine_install/standard.py
|
0fdeed3eccec4f809faae851c7edf31404598ceb
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/WISDEM
|
42fa780915d62fd4e4203050e886093ecc806c8a
|
d7270ebe1c554293a9d36730d67ab555c071cb17
|
refs/heads/master
| 2023-08-04T01:22:43.215105
| 2023-06-22T23:36:07
| 2023-06-22T23:36:07
| 23,678,280
| 120
| 86
|
Apache-2.0
| 2023-06-22T19:26:34
| 2014-09-04T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 13,676
|
py
|
"""`TurbineInstallation` class and related processes."""
__author__ = "Jake Nunemaker"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "Jake Nunemaker"
__email__ = "jake.nunemaker@nrel.gov"
from copy import deepcopy
from math import ceil
import numpy as np
import simpy
from marmot import process
from wisdem.orbit.core import Vessel
from wisdem.orbit.core.logic import (
jackdown_if_required,
shuttle_items_to_queue,
prep_for_site_operations,
get_list_of_items_from_port,
)
from wisdem.orbit.phases.install import InstallPhase
from wisdem.orbit.core.exceptions import ItemNotFound
from .common import Blade, Nacelle, TowerSection, install_nacelle, install_tower_section, install_turbine_blade
class TurbineInstallation(InstallPhase):
"""
Standard turbine installation module using a Wind Turbine Installation
Vessel (WTIV). If input `feeder` and `num_feeders` are not supplied, the
WTIV will perform all transport and installation tasks. If the above inputs
are defined, feeder barges will transport turbine components from port to
site.
"""
phase = "Turbine Installation"
capex_category = "Turbine"
#:
expected_config = {
"wtiv": "dict | str",
"feeder": "dict | str (optional)",
"num_feeders": "int (optional)",
"site": {"depth": "m", "distance": "km"},
"plant": {"num_turbines": "int"},
"port": {
"num_cranes": "int (optional, default: 1)",
"monthly_rate": "USD/mo (optional)",
"name": "str (optional)",
},
"turbine": {
"hub_height": "m",
"tower": {
"deck_space": "m2",
"mass": "t",
"length": "m",
"sections": "int (optional)",
},
"nacelle": {"deck_space": "m2", "mass": "t"},
"blade": {"deck_space": "m2", "mass": "t"},
},
}
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of TurbineInstallation.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.ndarray
Weather profile at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.initialize_port()
self.initialize_wtiv()
self.initialize_turbines()
self.setup_simulation(**kwargs)
@property
def system_capex(self):
"""Returns 0 as turbine capex is handled at in ProjectManager."""
return 0
def setup_simulation(self, **kwargs):
"""
Sets up simulation infrastructure, routing to specific methods dependent
on number of feeders.
"""
if self.config.get("num_feeders", None):
self.initialize_feeders()
self.initialize_queue()
self.setup_simulation_with_feeders(**kwargs)
else:
self.feeders = None
self.setup_simulation_without_feeders(**kwargs)
def setup_simulation_without_feeders(self, **kwargs):
"""
Sets up infrastructure for turbine installation without feeder barges.
"""
site_distance = self.config["site"]["distance"]
site_depth = self.config["site"]["depth"]
hub_height = self.config["turbine"]["hub_height"]
solo_install_turbines(
self.wtiv,
port=self.port,
distance=site_distance,
turbines=self.num_turbines,
tower_sections=self.num_sections,
num_blades=3,
site_depth=site_depth,
hub_height=hub_height,
**kwargs,
)
def setup_simulation_with_feeders(self, **kwargs):
"""
Sets up infrastructure for turbine installation using feeder barges.
"""
site_distance = self.config["site"]["distance"]
site_depth = self.config["site"]["depth"]
hub_height = self.config["turbine"]["hub_height"]
install_turbine_components_from_queue(
self.wtiv,
queue=self.active_feeder,
distance=site_distance,
turbines=self.num_turbines,
tower_sections=self.num_sections,
num_blades=3,
site_depth=site_depth,
hub_height=hub_height,
**kwargs,
)
for feeder in self.feeders:
shuttle_items_to_queue(
feeder,
port=self.port,
queue=self.active_feeder,
distance=site_distance,
items=self.component_list,
**kwargs,
)
def initialize_wtiv(self):
"""
Initializes the WTIV simulation object and the onboard vessel storage.
"""
wtiv_specs = self.config.get("wtiv", None)
name = wtiv_specs.get("name", "WTIV")
wtiv = self.initialize_vessel(name, wtiv_specs)
self.env.register(wtiv)
wtiv.initialize()
wtiv.at_port = True
wtiv.at_site = False
self.wtiv = wtiv
def initialize_feeders(self):
"""
Initializes feeder barge objects.
"""
number = self.config.get("num_feeders", None)
feeder_specs = self.config.get("feeder", None)
self.feeders = []
for n in range(number):
# TODO: Add in option for named feeders.
name = "Feeder {}".format(n)
feeder = self.initialize_vessel(name, feeder_specs)
self.env.register(feeder)
feeder.initialize()
feeder.at_port = True
feeder.at_site = False
self.feeders.append(feeder)
def initialize_turbines(self):
"""
Initializes turbine components at port.
"""
tower = deepcopy(self.config["turbine"]["tower"])
self.num_sections = tower.get("sections", 1)
_section = {}
for k in ["length", "deck_space", "mass"]:
try:
_section[k] = ceil(tower.get(k) / self.num_sections)
except TypeError:
pass
section = TowerSection(**_section)
nacelle = Nacelle(**self.config["turbine"]["nacelle"])
blade = Blade(**self.config["turbine"]["blade"])
component_list = [
*np.repeat(section, self.num_sections),
nacelle,
# TODO: Add in configuration for number of blades.
*np.repeat(blade, 3),
]
self.num_turbines = self.config["plant"]["num_turbines"]
for _ in range(self.num_turbines):
for item in component_list:
self.port.put(item)
self.component_list = [a.type for a in component_list]
def initialize_queue(self):
"""
Initializes the queue, modeled as a ``SimPy.Resource`` that feeders
join at site. This limits the simulation to one active feeder at a time.
"""
self.active_feeder = simpy.Resource(self.env, capacity=1)
self.active_feeder.vessel = None
self.active_feeder.activate = self.env.event()
@property
def detailed_output(self):
"""Returns detailed outputs of the turbine installation."""
if self.feeders:
transport_vessels = [*self.feeders]
else:
transport_vessels = [self.wtiv]
outputs = {
self.phase: {
**self.agent_efficiencies,
**self.get_max_cargo_mass_utilzations(transport_vessels),
**self.get_max_deck_space_utilzations(transport_vessels),
}
}
return outputs
@process
def solo_install_turbines(vessel, port, distance, turbines, tower_sections, num_blades, **kwargs):
"""
Logic that a Wind Turbine Installation Vessel (WTIV) uses during a single
turbine installation process.
Parameters
----------
vessel : vessels.Vessel
Vessel object that represents the WTIV.
distance : int | float
Distance between port and site (km).
component_list : dict
Turbine components to retrieve and install.
number : int
Total turbine component sets to install.
"""
reequip_time = vessel.crane.reequip(**kwargs)
component_list = [
*np.repeat("TowerSection", tower_sections),
"Nacelle",
*np.repeat("Blade", num_blades),
]
n = 0
while n < turbines:
if vessel.at_port:
try:
# Get turbine components
yield get_list_of_items_from_port(vessel, port, component_list, **kwargs)
except ItemNotFound:
# If no items are at port and vessel.storage.items is empty,
# the job is done
if not vessel.storage.items:
vessel.submit_debug_log(message="Item not found. Shutting down.")
break
# Transit to site
vessel.update_trip_data()
vessel.at_port = False
yield vessel.transit(distance)
vessel.at_site = True
if vessel.at_site:
if vessel.storage.items:
yield prep_for_site_operations(vessel, **kwargs)
for i in range(tower_sections):
# Get tower section
section = yield vessel.get_item_from_storage("TowerSection", **kwargs)
# Install tower section
height = section.length * (i + 1)
yield install_tower_section(vessel, section, height, **kwargs)
# Get turbine nacelle
nacelle = yield vessel.get_item_from_storage("Nacelle", **kwargs)
# Install nacelle
yield vessel.task_wrapper("Reequip", reequip_time, constraints=vessel.transit_limits)
yield install_nacelle(vessel, nacelle, **kwargs)
# Install turbine blades
yield vessel.task_wrapper("Reequip", reequip_time, constraints=vessel.transit_limits)
for _ in range(num_blades):
blade = yield vessel.get_item_from_storage("Blade", **kwargs)
yield install_turbine_blade(vessel, blade, **kwargs)
yield jackdown_if_required(vessel, **kwargs)
vessel.submit_debug_log(progress="Turbine")
n += 1
else:
# Transit to port
vessel.at_site = False
yield vessel.transit(distance)
vessel.at_port = True
vessel.submit_debug_log(message="Turbine installation complete!")
@process
def install_turbine_components_from_queue(wtiv, queue, distance, turbines, tower_sections, num_blades, **kwargs):
"""
Logic that a Wind Turbine Installation Vessel (WTIV) uses to install
turbine componenets from a queue of feeder barges.
Parameters
----------
env : simulation.Environment
SimPy environment that the simulation runs in.
wtiv : vessels.Vessel
Vessel object that represents the WTIV.
queue : simpy.Resource
Queue object to interact with active feeder barge.
component_list : dict
Turbine components to retrieve and install.
number : int
Total turbine component sets to install.
distance : int | float
Distance from site to port (km).
"""
reequip_time = wtiv.crane.reequip(**kwargs)
n = 0
while n < turbines:
if wtiv.at_port:
# Transit to site
wtiv.at_port = False
yield wtiv.transit(distance)
wtiv.at_site = True
if wtiv.at_site:
if queue.vessel:
# Prep for turbine install
yield prep_for_site_operations(wtiv, **kwargs)
for i in range(tower_sections):
# Get tower section
section = yield wtiv.get_item_from_storage("TowerSection", vessel=queue.vessel, **kwargs)
# Install tower section
height = section.length * (i + 1)
yield install_tower_section(wtiv, section, height, **kwargs)
# Get turbine nacelle
nacelle = yield wtiv.get_item_from_storage("Nacelle", vessel=queue.vessel, **kwargs)
# Install nacelle
yield wtiv.task_wrapper("Reequip", reequip_time, constraints=wtiv.transit_limits)
yield install_nacelle(wtiv, nacelle, **kwargs)
# Install turbine blades
yield wtiv.task_wrapper("Reequip", reequip_time, constraints=wtiv.transit_limits)
for i in range(num_blades):
release = True if i + 1 == num_blades else False
blade = yield wtiv.get_item_from_storage("Blade", vessel=queue.vessel, release=release, **kwargs)
yield install_turbine_blade(wtiv, blade, **kwargs)
yield jackdown_if_required(wtiv, **kwargs)
wtiv.submit_debug_log(progress="Turbine")
n += 1
else:
start = wtiv.env.now
yield queue.activate
delay_time = wtiv.env.now - start
wtiv.submit_action_log("Delay", delay_time, location="Site")
# Transit to port
wtiv.at_site = False
yield wtiv.transit(distance)
wtiv.at_port = True
wtiv.submit_debug_log(message="Turbine installation complete!")
|
[
"garrett.barter@nrel.gov"
] |
garrett.barter@nrel.gov
|
9b97bf6648e2e2876b8dc321b2f93a5dfe46ff6d
|
8ae9bdbb56622e7eb2fe7cf700b8fe4b7bd6e7ae
|
/llvm-3.8.0-r267675/tools/lldb/packages/Python/lldbsuite/test/lang/objc/objc-static-method-stripped/TestObjCStaticMethodStripped.py
|
7d88292a051c3c607fa10a165ca504bdf7621cd3
|
[
"NCSA"
] |
permissive
|
mapu/toolchains
|
f61aa8b64d1dce5e618f0ff919d91dd5b664e901
|
3a6fea03c6a7738091e980b9cdee0447eb08bb1d
|
refs/heads/master
| 2021-09-16T00:07:16.731713
| 2017-12-29T04:09:01
| 2017-12-29T04:09:01
| 104,563,481
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
"""Test calling functions in static methods with a stripped binary."""
from __future__ import print_function
import os, time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestObjCStaticMethodStripped(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.main_source = "static.m"
self.break_line = line_number(self.main_source, '// Set breakpoint here.')
@skipUnlessDarwin
@add_test_categories(['pyapi'])
@skipIf(debug_info=no_match("dsym"), bugnumber="This test requires a stripped binary and a dSYM")
#<rdar://problem/12042992>
def test_with_python_api(self):
"""Test calling functions in static methods with a stripped binary."""
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
self.build()
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
bpt = target.BreakpointCreateByLocation(self.main_source, self.break_line)
self.assertTrue(bpt, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread_list = lldbutil.get_threads_stopped_at_breakpoint (process, bpt)
# Make sure we stopped at the first breakpoint.
self.assertTrue (len(thread_list) != 0, "No thread stopped at our breakpoint.")
self.assertTrue (len(thread_list) == 1, "More than one thread stopped at our breakpoint.")
# Now make sure we can call a function in the static method we've stopped in.
frame = thread_list[0].GetFrameAtIndex(0)
self.assertTrue (frame, "Got a valid frame 0 frame.")
cmd_value = frame.EvaluateExpression ("(char *) sel_getName (_cmd)")
self.assertTrue (cmd_value.IsValid())
sel_name = cmd_value.GetSummary()
self.assertTrue (sel_name == "\"doSomethingWithString:\"", "Got the right value for the selector as string.")
cmd_value = frame.EvaluateExpression ("[Foo doSomethingElseWithString:string]")
self.assertTrue (cmd_value.IsValid())
string_length = cmd_value.GetValueAsUnsigned()
self.assertTrue (string_length == 27, "Got the right value from another class method on the same class.")
|
[
"wangl@cb94f8c2-beb9-42d2-aaaf-6dc30ea5c36a"
] |
wangl@cb94f8c2-beb9-42d2-aaaf-6dc30ea5c36a
|
8924ae91a905f5467e3fff018de639b0e95d9a0d
|
9a7a6190a24b7a570ccdc1b598b1c64428c11c1b
|
/randomDictionary.py
|
d6f8cc0baf15657afc356133f8ebd756dca0f95e
|
[] |
no_license
|
kadeemp/Tweet-Generator
|
0254ee07ee1f63684f23b47f9569d40210807613
|
d0db07e3f710d27fb29930c4093e762a5b3f36ef
|
refs/heads/master
| 2021-01-11T15:55:55.248665
| 2017-02-22T09:22:00
| 2017-02-22T09:22:00
| 82,723,228
| 0
| 0
| null | 2017-02-21T20:20:51
| 2017-02-21T20:20:51
| null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import sys
import random
x = int(sys.argv[1])
resultString = ""
f = open("/usr/share/dict/words", "r")
content = f.readlines()
content = [i.rstrip("\n") for i in content]
if sys.argv[1] is not None:
while x != 0:
resultString += random.choice(content)
resultString += " "
x-=1
if x == 0:
resultString += "."
print resultString
g.close()
|
[
"kadeempalacios@gmail.com"
] |
kadeempalacios@gmail.com
|
b2a2c92eb15a5e754235765bfab30e037954b066
|
abd80c57a2ce54fa7b14827d30b339956c4391ba
|
/lesson2/conftest.py
|
04862734029e24e11c59472e1ee11d9d174923b0
|
[] |
no_license
|
ksanayarysh/otus-qa-course
|
8245c1369a6d9b26c81f24a9297ac6e600749480
|
245fd72fa82808b78d52f22e5edc48e5f1cc4bba
|
refs/heads/master
| 2020-05-01T05:21:25.147169
| 2019-07-15T10:10:55
| 2019-07-15T10:10:55
| 177,298,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
"""Conftest"""
import pytest
def pytest_addoption(parser):
"""Опция командной строки"""
parser.addoption("--site", action="store", default="None")
def pytest_collection_modifyitems(config, items):
"""Получаем и в зависимости от ее значения скипим тесты"""
value = config.getoption("--site")
skip = pytest.mark.skip(reason="no need to run")
for item in items:
if value not in item.keywords:
item.add_marker(skip)
|
[
"ksana32@gmail.com"
] |
ksana32@gmail.com
|
743c943cfb2adf558e8b63f6b4820d215d0b3eb1
|
4db2a13f9f0a85e9fd38cf245462ec91f2c6ce67
|
/proj_11/n2tGrader.py
|
40c62d524b350537a3aab34970ccda139d9b1696
|
[] |
no_license
|
alexgh123/nand_2_tetris
|
c6f5bde84bf064152e2a3abcbf96d86a707b89da
|
1611b8e592082fc12d7d36e23ddb75c2c753b2df
|
refs/heads/master
| 2021-05-12T01:40:08.535774
| 2018-01-15T16:09:16
| 2018-01-15T16:09:16
| 117,565,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,005
|
py
|
#
#
#N2T grading server
# projectnum in executable portion at bottom is the directory to be graded
#
#
# on *nix flavors launch from the command line as
# python3 n2tGrader.py --XX
#
# where --XX is the project number being run
#
#
# on Windows launch from the command line as
# n2tGrader.py --XX --w
#
# where --XX is the project number being run
# and the --w is telling the script to use the windows .bat launch style
#
#
import os
import shutil
from pathlib import Path
import subprocess
NIX = 0
WIN = 1
PLATFORM = NIX
def testResults_01_02_05(codeDirectory, whichProject):
testList = tests[whichProject]
for test in testList:
print(codeDirectory + '/' + test + 'tst')
#launch the HardwareSimulator with test as the arg
testCommand = '../../nand2tetris/tools/HardwareSimulator.sh ' + codeDirectory + '/' + test + 'tst'
try:
subprocess.run(testCommand, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe)
def testResults_03(codeDirectory):
testDict = tests['tests03']
for sublist in ['a', 'b']:
testList = testDict[sublist]
for test in testList:
#launch the HardwareSimulator with test as the arg
print(test)
testCommand = '../../nand2tetris/tools/HardwareSimulator.sh ' + codeDirectory + '/' + sublist[0] + '/' + test + 'tst'
try:
subprocess.run(testCommand, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe)
def testResults_04(codeDirectory):
#assemble the files to .hack machine language
command = '../../nand2tetris/tools/Assembler.sh ' + codeDirectory + '/fill/Fill.asm'
try:
subprocess.run(command, stdin=None, stdout=None, timeout=0.5, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to assemble')
#fill must be run manually in CPU Emulator as it is a visual inspection
command = '../../nand2tetris/tools/Assembler.sh ' + codeDirectory + '/mult/Mult.asm'
try:
subprocess.run(command, stdin=None, stdout=None, timeout=0.5, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to assemble')
#launch the CPUEmulator
command = '../../nand2tetris/tools/CPUEmulator.sh ' + codeDirectory + '/mult/Mult.tst'
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to test')
def compareLines(expectedFilePath, resultFilePath):
#print('\n comparelines\n ', str(expectedFilePath), '\n ', str(resultFilePath))
expected = open(str(expectedFilePath), 'r')
result = open(str(resultFilePath), 'r')
grade = True
expectedLine = 'k'
lineNumber = 0
while len(expectedLine) > 0:
expectedLine = expected.readline().strip()
resultLine = result.readline().strip()
if expectedLine != resultLine:
grade = False
print('bad comparison', str(expectedFilePath.stem), 'line', lineNumber, expectedLine, resultLine)
break
else:
lineNumber += 1
expected.close()
result.close()
if (grade):
print(' good:' , str(expectedFilePath.stem))
def compare_06(codeDirectory, checkfilesDir):
resultFilePath = codeDirectory / 'add/Add.hack'
expectedFilePath = checkfilesDir / 'Add.hack'
try:
compareLines(expectedFilePath, resultFilePath)
except Exception as error:
print(codeDirectory, ':', error, 'while trying to compare student program')
resultFilePath = codeDirectory / 'max/Max.hack'
expectedFilePath = checkfilesDir / 'Max.hack'
try:
compareLines(expectedFilePath, resultFilePath)
except Exception as error:
print(codeDirectory, ':', error, 'while trying to compare student program')
resultFilePath = codeDirectory / 'rect/Rect.hack'
expectedFilePath = checkfilesDir / 'Rect.hack'
try:
compareLines(expectedFilePath, resultFilePath)
except Exception as error:
print(codeDirectory, ':', error, 'while trying to compare student program')
resultFilePath = codeDirectory / 'pong/Pong.hack'
expectedFilePath = checkfilesDir / 'Pong.hack'
try:
compareLines(expectedFilePath, resultFilePath)
except Exception as error:
print(codeDirectory, ':', error, 'while trying to compare student program')
print('\n', codeDirectory.name, 'done\n\n\n')
def assemble_06(codeDirectory, checkfilesDir):
command = codeDirectory / 'Assembler.py'
command = str(command)
argDir = codeDirectory / 'add/Add.asm'
arg = str(argDir)
try:
subprocess.call(['python3', command, arg])
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to run student program')
argDir = codeDirectory / 'max/Max.asm'
arg = str(argDir)
try:
subprocess.call(['python3', command, arg])
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to run student program')
argDir = codeDirectory / 'rect/rect.asm'
arg = str(argDir)
try:
subprocess.call(['python3', command, arg])
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to run student program')
argDir = codeDirectory / 'pong/Pong.asm'
arg = str(argDir)
try:
subprocess.call(['python3', command, arg])
except subprocess.CalledProcessError as cpe:
print(codeDirectory, ':', cpe, 'while trying to run student program')
def testResults_07(studentPath):
for eachtarget in targets07:
if PLATFORM == NIX:
command = 'python3 ' + studentPath.name + '/VMtoMnemonics.py ' + studentPath.name + '/' + eachtarget #*NIX platform
else:
command = 'python ' + studentPath.name + '\\VMtoMnemonics.py ' + studentPath.name + '\\' + eachtarget #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(studentPath.name, ':', cpe, 'while compiling', eachtarget )
for eachtest in tests07:
if PLATFORM == NIX:
command = 'tools/CPUEmulator.sh ' + studentPath.name + eachtest + '.tst' #*NIX platform
else:
command = 'tools\\CPUEmulator.bat ' + studentPath.name + eachtest + '.tst' #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(studentPath.name, ':', cpe, 'while running', eachtest )
print('\n', studentPath.name, 'done\n\n\n')
def testResults_08(studentPath):
for eachtarget in targets08:
if PLATFORM == NIX:
command = 'python3 ' + studentPath.name + '/VMtoMnemonics.py ' + studentPath.name + '/' + eachtarget #*NIX platform
else:
command = 'python ' + studentPath.name + '\\VMtoMnemonics.py ' + studentPath.name + '\\' + eachtarget #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(studentPath.name, ':', cpe, 'while compiling', eachtarget )
for eachtest in tests08:
if PLATFORM == NIX:
command = 'tools/CPUEmulator.sh ' + studentPath.name + eachtest + '.tst' #*NIX platform
else:
command = 'tools\\CPUEmulator.bat ' + studentPath.name + eachtest + '.tst' #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(studentPath.name, ':', cpe, 'while running', eachtest )
print('\n', studentPath.name, 'done\n\n\n')
def testResults10(studentPath, checkFilesPath):
#move existing .xml files
for eachtarget in targets10:
if PLATFORM == NIX:
resultDirectory = studentPath.name + '/' + eachtarget
newDir = resultDirectory + '/' + 'oldXML'
else:
resultDirectory = studentPath.name + '\\' + eachtarget #WIN platform
newDir = resultDirectory + '\\' + 'oldXML'
resultDirectory = Path(resultDirectory)
newDirPath = Path(newDir)
if not newDirPath.exists():
os.mkdir(newDir)
for file in resultDirectory.iterdir():
if file.suffix == '.xml':
if PLATFORM == NIX:
newName = newDir + '/' + file.name
else:
newName = newDir + '\\' + file.name #WIN platform
shutil.move( str(file) , str(newName) )
print('\n xml moves complete for ' + studentPath.name)
#run the project code
for eachtarget in targets10:
if PLATFORM == NIX:
command = 'python3 ' + studentPath.name + '/JackAnalyzer.py ' + studentPath.name + '/' + eachtarget #*NIX platform
else:
command = 'python ' + studentPath.name + '\\JackAnalyzer.py ' + studentPath.name + '\\' + eachtarget #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(str(studentPath), ':', cpe, 'while compiling', eachtarget )
print('\n Translations complete for ' + studentPath.name)
for eachtarget in targets10:
resultDirectory = studentPath.name + '/' + eachtarget
resultDirectory = Path(resultDirectory)
expectedDirectory = checkFilesPath
print('\n Comparing', eachtarget)
for file in resultDirectory.iterdir():
found = False
if file.suffix == '.xml':
found = True
result = open(str(file), 'r')
expectedFileName = str( expectedDirectory / eachtarget / file.name )
expected = open(expectedFileName, 'r')
lineNumber = 0
expectedLine = expected.readline().strip()
resultLine = result.readline().strip()
while resultLine and expectedLine:
lineNumber += 1
if expectedLine != resultLine:
print('bad comparison', str(file.stem), 'your results file line', lineNumber)
expected.close()
result.close()
return
expectedLine = expected.readline().strip()
resultLine = result.readline().strip()
expected.close()
result.close()
print(' good:' , str(file.stem))
elif file.is_dir:
found = True
if not found:
print(' No file to translate')
print('\n', studentPath.name, 'done\n\n\n')
def testMilestone11(studentPath, checkFilesPath):
#move existing .xml files
for eachtarget in targets11:
if PLATFORM == NIX:
resultDirectory = studentPath.name + '/' + eachtarget
newDir = resultDirectory + '/' + 'oldFiles'
else:
resultDirectory = studentPath.name + '\\' + eachtarget #WIN platform
newDir = resultDirectory + '\\' + 'oldFiles'
resultDirectory = Path(resultDirectory)
newDirPath = Path(newDir)
if not newDirPath.exists():
os.mkdir(newDir)
for file in resultDirectory.iterdir():
if file.suffix == '.xml':
if PLATFORM == NIX:
newName = newDir + '/' + file.name
else:
newName = newDir + '\\' + file.name #WIN platform
shutil.move( str(file) , str(newName) )
print('\n xml moves complete for ' + studentPath.name)
#run the project code
for eachtarget in targets11:
if PLATFORM == NIX:
command = 'python3 ' + studentPath.name + '/JackAnalyzer.py ' + studentPath.name + '/' + eachtarget #*NIX platform
else:
command = 'python ' + studentPath.name + '\\JackAnalyzer.py ' + studentPath.name + '\\' + eachtarget #WIN platform
try:
subprocess.run(command, stdin=None, stdout=None, shell=True, check=True)
except subprocess.CalledProcessError as cpe:
print(str(studentPath), ':', cpe, 'while compiling', eachtarget )
print('\n Translations complete for ' + studentPath.name)
for eachtarget in targets11:
resultDirectory = studentPath.name + '/' + eachtarget
resultDirectory = Path(resultDirectory)
expectedDirectory = checkFilesPath
print('\n Comparing', eachtarget)
for file in resultDirectory.iterdir():
found = False
if file.suffix == '.xml' or file.suffix == '.vm':
found = True
result = open(str(file), 'r')
expectedFileName = str( expectedDirectory / eachtarget / file.name )
expected = open(expectedFileName, 'r')
lineNumber = 0
expectedLine = expected.readline().strip()
resultLine = result.readline().strip()
while resultLine and expectedLine:
lineNumber += 1
if expectedLine != resultLine:
print('bad comparison', str(file.stem), 'your results file line', lineNumber)
expected.close()
result.close()
return
expectedLine = expected.readline().strip()
resultLine = result.readline().strip()
expected.close()
result.close()
print(' good:' , str(file.name))
elif file.is_dir:
found = True
if not found:
print(' No file to translate')
print('\n', studentPath.name, 'done\n\n\n')
#############################################################
#############################################################
#############################################################
if __name__ == '__main__':
projectNum = '01'
tests01 = [ 'And.', 'And16.', 'DMux.', 'DMux4Way.', 'DMux8Way.', 'Mux.', 'Mux4Way16.',
'Mux8Way16.', 'Mux16.', 'Not.', 'Not16.', 'Or.', 'Or8Way.', 'Or16.', 'Xor.']
tests02 = [ 'Add16.', 'ALU-nostat.', 'ALU.', 'FullAdder.', 'HalfAdder.', 'inc16.']
tests03 = { 'a':['Bit.', 'PC.', 'RAM8.', 'RAM64.', 'Register.'],
'b':['RAM4K.', 'RAM16K.', 'RAM512.'] }
tests05 = [ 'ComputerAdd-external.', 'ComputerAdd.', 'ComputerMax-external.', 'ComputerMax.',
'ComputerRect-external.', 'ComputerRect.', 'CPU-external.', 'CPU.'] # , 'Memory.'
tests07 = [ '/StackArithmetic/SimpleAdd/SimpleAdd', '/StackArithmetic/StackTest/StackTest',
'/MemoryAccess/BasicTest/BasicTest', '/MemoryAccess/PointerTest/PointerTest',
'/MemoryAccess/StaticTest/StaticTest']
targets07 = [ 'StackArithmetic/SimpleAdd', 'StackArithmetic/StackTest',
'MemoryAccess/BasicTest', 'MemoryAccess/PointerTest',
'MemoryAccess/StaticTest']
tests08 = [ '/ProgramFlow/BasicLoop/BasicLoop', '/ProgramFlow/FibonacciSeries/FibonacciSeries',
'/FunctionCalls/SimpleFunction/SimpleFunction', '/FunctionCalls/NestedCall/NestedCall',
'/FunctionCalls/FibonacciElement/FibonacciElement', '/FunctionCalls/StaticsTest/StaticsTest']
targets08 = [ 'ProgramFlow/BasicLoop', 'ProgramFlow/FibonacciSeries',
'FunctionCalls/SimpleFunction', 'FunctionCalls/NestedCall',
'FunctionCalls/FibonacciElement', 'FunctionCalls/StaticsTest']
targets10 = ['ExpressionlessSquare', 'ArrayTest', 'Square']
targets11 = ['Seven', 'ConvertToBin', 'Square', 'Average', 'Pong', 'ComplexArrays']
tests = {'tests01': tests01, 'tests02': tests02, 'tests03':tests03, 'tests05': tests05}
singleDepth = ['tests01', 'tests02', 'tests05']
#start with directory this file is in
startingDirectory = Path.cwd()
import argparse
ap = argparse.ArgumentParser(prog='n2tGradingServer.py')
ap.add_argument( '--01', action='store_const', const='tests01', dest="project", help='which project is being graded')
ap.add_argument( '--02', action='store_const', const='tests02', dest="project", help='which project is being graded')
ap.add_argument( '--03', action='store_const', const='tests03', dest="project", help='which project is being graded')
ap.add_argument( '--04', action='store_const', const='tests04', dest="project", help='which project is being graded')
ap.add_argument( '--05', action='store_const', const='tests05', dest="project", help='which project is being graded')
ap.add_argument( '--06', action='store_const', const='tests06', dest="project", help='which project is being graded')
ap.add_argument( '--07', action='store_const', const='tests07', dest="project", help='which project is being graded')
ap.add_argument( '--08', action='store_const', const='tests08', dest="project", help='which project is being graded')
ap.add_argument( '--10', action='store_const', const='tests10', dest="project", help='which project is being graded')
ap.add_argument( '--11', action='store_const', const='milestone11', dest="project", help='which project is being graded')
ap.add_argument( '--w', action='store_const', const='windows', dest="platform", help='which project is being graded')
args = ap.parse_args()
whichProject = args.project
platformToggle = args.platform
if platformToggle == 'windows':
PLATFORM = WIN
else:
PLATFORM = NIX
checkFilesPath = startingDirectory / 'zzzzCheckfiles' #for 06, 10 & 11
for studentPath in startingDirectory.iterdir():
if studentPath.is_dir():
print('\n\ndirectory:', studentPath.name)
if ('zzzz' not in studentPath.name) and ('tools' not in studentPath.name):
if whichProject in singleDepth:
testResults_01_02_05(studentPath.name, whichProject)
elif whichProject == 'tests03':
testResults_03(studentPath.name)
elif whichProject == 'tests04':
testResults_04(studentPath.name)
elif whichProject == 'tests06':
assemble_06(studentPath, checkFilesPath)
compare_06(studentPath, checkFilesPath)
elif whichProject == 'tests07':
testResults_07(studentPath)
elif whichProject == 'tests08':
testResults_08(studentPath)
elif whichProject == 'tests10':
testResults10(studentPath, checkFilesPath)
elif whichProject == 'milestone11':
testMilestone11(studentPath, checkFilesPath)
else:
print('projectName mismatch for', studentPath.name, whichProject)
else:
print(studentPath.name, 'skipped for zzzz')
|
[
"hardt.alex@gmail.com"
] |
hardt.alex@gmail.com
|
8d3d2be82260947a4a95d2a84f66d50d504dd61c
|
4e01416394a229e2afeede99493d282f4259b1a1
|
/examples/optimal_transport/crowd/converging_corridor.py
|
cd4e833c29abd66373a46cc9a712f89ec1e8d84f
|
[] |
no_license
|
sd-ot/pysdot
|
b2b2abb53e4e87d53e0bb3734c1624b1fc5e5f3b
|
8d16000c36deb9ab1aa98b819179741b7b65409d
|
refs/heads/master
| 2023-07-20T07:15:58.469620
| 2023-07-14T13:42:48
| 2023-07-14T13:42:48
| 176,300,757
| 4
| 2
| null | 2020-08-02T06:19:26
| 2019-03-18T14:13:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
from pysdot.domain_types import ConvexPolyhedraAssembly
from pysdot.radial_funcs import RadialFuncInBall
from pysdot import OptimalTransport
import numpy as np
# constants
for n in [ 160 ]: # 20, 40, 80,
directory = "results/converging_corridor_{}".format( n )
t = np.linspace(-2,2,n)
h = 2./n
x,y = np.meshgrid(t,t)
positions = np.hstack((np.reshape(x,(n*n,1)),
np.reshape(y,(n*n,1))))
R2 = positions[ :, 0 ]**2 + positions[ :, 1 ]**2
positions = positions[ R2 <= 4 ]
positions = positions[ positions[:,0] + positions[:,1] > 1.0 / n ]
positions = positions[ - positions[:,0] + positions[:,1] > 1.0 / n ]
N = positions.shape[ 0 ]
rho0 = 1 / np.pi
mass = 0.25 * rho0 * np.pi * 4 / N
target_radius = ( mass / np.pi )**0.5
# iterations
weights = np.ones( positions.shape[ 0 ] ) * target_radius**2
domain = ConvexPolyhedraAssembly()
domain.add_convex_polyhedron([
[ 0, 0, +1, -1 ],
[ 9, 9, 0, +1 ],
[ -9, 9, -1, -1 ],
])
domain.display_boundaries_vtk( directory + "/bounds.vtk" )
color_values = 0.5 * np.linalg.norm( positions, axis=1, keepdims=True, ord=2 )
color_values = ( color_values - np.min( color_values ) ) / ( np.max( color_values ) - np.min( color_values ) )
ot = OptimalTransport(domain, RadialFuncInBall())
ot.set_weights( weights )
ot.set_masses( np.ones( positions.shape[ 0 ] ) * mass )
nb_timesteps = int( 3 / target_radius )
for i in range( nb_timesteps ):
# change positions
positions -= 0.4 * target_radius / np.linalg.norm( positions, axis=1, keepdims=True, ord=2 ) * positions
ot.set_positions(positions)
# optimal weights
ot.adjust_weights()
# display
d = int( n / 5 )
if i % d == 0:
# ot.display_asy( directory + "/pd_{:03}.asy".format( int( i / d ) ), "in_ball(weight**0.5)", positions, weights, domain, values = color_values, linewidth=0.002 )
ot.display_vtk( directory + "/pd_{:03}.vtk".format( int( i / d ) ) )
# update positions
positions = ot.get_centroids()
|
[
"hugal.leclerc@gmail.com"
] |
hugal.leclerc@gmail.com
|
634dcc3314cc9dc2e05696d565b2107bde5d47b9
|
758ac6a75ca021d8595975be0d41fa50c7bfdf88
|
/snownlp/recipes.py
|
f551ff43cf6bf604199eb3d92f46e6401ca365bd
|
[] |
no_license
|
Henry2012/recipes
|
5a04197a41e94a638c20350b3e0ec6d23702808d
|
fe61d1bd57f922a41a816939e5ef2e9abd7eb6e9
|
refs/heads/master
| 2020-04-21T14:16:51.034571
| 2014-07-01T08:09:35
| 2014-07-01T08:09:35
| 18,829,830
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,541
|
py
|
#! /usr/bin/python
#-*- coding:utf-8 -*-
"""
Author: Qiqun Han
File: snownlp.recipes.py
Description: this program
Creation: 2013-12-15
Revision: 2013-12-15
"""
from snownlp import SnowNLP
text = u"""
宁波华翔:海外资产拐点显现 并购协同助力国内增长
公司是国内优秀汽车内饰供应商,通过合资与并购实现高速增长。公司与世界知名汽车零部件公司合资,产品质量、市场地位处于国内领先水平;通过并购快速开拓新客户与新产品,营收规模高速提升;下游客户包括南北大众、宝马丰田等主流合资品牌,并通过海外并购进入德国宝马奔驰、美国通用等世界汽车商海外配套体系,成长空间拓展至全球配套市场。
核心资产盈利能力强,随下游客户共同成长。11-12 年由于参股公司调整等非经常因素影响,净利润出现大幅下滑。但公司核心资产盈利能力优异,随整车客户完成全国布局,通过合资并购开拓新客户与新产品,非经常性因素影响消除后,业绩增速将恢复到20%左右。
海外并购拓展公司产品种类和客户资源,为未来发展奠定基础。公司先后收购德国Sellner 和HIB 公司,借海外公司技术实力和客户资源提升内饰产品的品质,并进入德国奔驰宝马、美国通用配套体系。同时,公司参股德国Helbako 30%股权,进军汽车电子领域,并合资在国内设立生产基地,开拓国内汽车电子市场,为未来准备新的增长空间。
欧洲车市向好,海外资产有望迎来拐点。公司通过合资收购成长,整合经验丰富,海外资产整合成功是大概率事件。海外资产在木内饰领域竞争力强,12 年收入近20 亿,亏损4000 多万,恢复正常有望贡献1-2 亿盈利,弹性巨大。近期欧洲车市现向好迹象,海外资产改善将大幅提升业绩。
"""
# s = SnowNLP(text)
# for each in s.keywords(5):
# print each
# print "-" * 50
# for each in s.summary(5):
# print each
# print "-" * 50
# for each in s.sentences:
# print each
#
# print s.tf
# print s.idf
#===============================================================================
# 相似度模块
#===============================================================================
s = SnowNLP([[u'创业', u'板', u'暴跌', u'4.37%', u'现', u'巨 阴', u'断头铡']]
)
print s.sim([u'创业', u'板', u'暴跌', u'4.37%', u'现', u'巨 阴', u'断头铡'])
|
[
"qiqun.h@everstring.net"
] |
qiqun.h@everstring.net
|
09651e680a7caf268633806dca07f6e0d52ed6e5
|
993d0b2a0f0e439be5aec2855f5da5f33fe78d44
|
/Universal_Integrated_API/jc/rsync_tools.py
|
03c2699407e4c73e5229593d6ef02a76723a076e
|
[] |
no_license
|
cyril-wx/universal_integrated_api
|
e50fb72f573f30f337d5972e50a3750553a0bc26
|
e2a3f8cd5b3bbf6d043f1b75bdefced1a5b2f594
|
refs/heads/master
| 2022-11-07T07:56:52.813987
| 2020-01-05T10:39:37
| 2020-01-05T10:39:37
| 276,130,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,488
|
py
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
#********************************************** #
# Master and Slave rsync Tools #
# AutoPanic主从服务器同步工具 #
#---------------------------------------------- #
# @Author: Cyril #
# @Mail: 848873227@qq.com #
# @Create: 2019-06-08 #
# @Modify: 2019-06-10 #
# @Tips: #
#********************************************** #
from jc import utils as jt
from jc import csv_rw as jcsv
import os
import re
import sys
class SyncTool:
#logger = logging.getLogger("TEST").setLevel(logging.DEBUG)
# 使用默认的 Logger 配置
# logging.basicConfig(handlers=[logging.FileHandler("jc_utils.log", encoding="utf-8")], filemode="w", format="[%(asctime)s] %(levelname)s: [%(funcName)s]: %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.DEBUG)
ip_pool = [] # 二维数组
conf_path = "/tmp/autopanic_ips_stats.csv"
tmp_csv_file = "/tmp/1.csv"
origin_data=[
["Station", "IP", "Stats", "role"],
["s1", "172.21.156.46", "Online", "master"],
["s2", "172.21.204.237", "Online", "slave"],
["s3", "172.21.204.238", "Online", "slave"],
["s4", "172.21.204.239", "Online", "slave"],
]
def __init__(self, conf_path=conf_path):
"""
# 初始化
:param conf_path: 配置文件,存放集群内所有机器IP,csv文件格式存储
"""
# 初始化一个log处理器
self.mlog = jt.my_logger("jc_rsync_ptool")
try:
self.ip_pool = jcsv.readCSVFile(conf_path)
self.ip_pool.remove(self.ip_pool[0])
conf_parent_path = os.path.split(conf_path)[0]
except Exception as e:
self.mlog.warning("%s initializing failed." %__name__)
self.mlog.exception(e)
else:
self.mlog.info("%s initializing successd." %__name__)
# def run_IP_Pool(self):
# pass
'''
def setIPStats(self, IP, Stats="Online"):
"""
# Add/Update IPStats to local IP_Pool
# 请勿直接调用此方法
:param IP:
:param Stats: Online/Offline (str)
:return: True/False
"""
print (self.ip_pool)
'''
def fullSync(self, remoteIP, syncFileOrDir, reverse=False, remoteUser="gdlocal", remotePWD="gdlocal"):
"""
# Full synchronization
# 两个工站进行全量同步 (旧版本不会覆盖新版本文件)
:param remoteIP: 远程工站IP需手动指定
:param syncFileOrDir: 要同步的文件或文件夹
:param reverse: 是否反向同步 True/False
# False: 正向同步,直接覆盖远程文件
# True: 反向同步,即下载远程文件到本地临时文件/tmp/1.csv
:param remoteUser: 被同步文件工站用户名
:param remoteUser: 被同步文件工站用户密码
:return: True/False
"""
if not re.match("\d+.\d+.\d+.\d+", remoteIP):
self.mlog.exception("'IP' is invalid type.")
exit(1)
(dir, filename) = os.path.split(syncFileOrDir)
## -u 同步时旧版本不会覆盖新版本
if reverse:
sync_cmd = "/usr/bin/rsync -avu %s@%s://%s %s" %( remoteUser, remoteIP, syncFileOrDir, self.tmp_csv_file)
else:
sync_cmd = "/usr/bin/rsync -avu %s %s@%s://%s" % (syncFileOrDir, remoteUser, remoteIP, dir)
self.mlog.debug("sync_cmd=%s" % sync_cmd)
cmd = """
expect << EOF
set timeout 3
spawn %s
expect {
-re "Are you sure you want to continue connecting (yes/no)?" { send "yes\r"; exp_continue }
-re "Password:" { send "%s\r"; exp_continue }
-re "total size is" { exit 0}
timeout {
send_user "Timeout...exit.\r" ;
exit 1
}
eof {
send_user "EOF finish.\r" ;
exit 2
}
}
EOF
""" %(sync_cmd, remotePWD)
(res, rev) = jt.readCMD(cmd, True)
if res == 0:
self.mlog.info("Get remote file successul. [IP:%s]" % remoteIP)
else: ## rsync 有可能失败,尝试重试
(res, rev) = jt.readCMD(cmd, True)
if res != 0:
self.mlog.warning("Get remote file failed. [IP:%s], exiting..." % remoteIP)
return False
else:
self.mlog.info("Retry get remote file successul. [IP:%s]" % remoteIP)
return True
def increSync(self, remoteIP, syncFileOrDir, remoteUser="gdlocal", remotePWD="gdlocal"):
"""
# Incremental synchronization
# 两个工站进行增量同步 (其中一个是本机)
# Tips: 适用于单文件单增量同步,传入单syncFileOrDir若是文件夹则可能失败
:param remoteIP: 远程工站IP需手动指定
:param syncFileOrDir: 要同步的文件或文件夹
:param reverse: 是否反向同步 True/False, 默认正向同步False
:param remoteUser: 被同步文件工站用户名
:param remoteUser: 被同步文件工站用户密码
:return: True/False
"""
if not os.path.isfile(syncFileOrDir):
self.mlog.warning("'%s' should be a exist file path." %syncFileOrDir)
# return False
# 反向同步,即下载远程文件到本地. 如果失败则退出。
if not self.fullSync(remoteIP, syncFileOrDir, True, remoteUser, remotePWD):
self.mlog.exception("Failed: Can't get remote file from [IP:%s]." %remoteIP )
return False
remote_data = jcsv.readCSVFile(self.tmp_csv_file)
local_data = jcsv.readCSVFile(syncFileOrDir)
jt.readCMD(["rm -rf %s" %self.tmp_csv_file], True )
## 将本地数据与远程数据对比合并重复项,并写入新数据
if remote_data and local_data and remote_data == local_data:
## 如果数据对比相同则不写入新数据和远程同步
return True
else:
new_data = self.mergeLists(remote_data, local_data)
# 清空原数据表
with open(syncFileOrDir, "w") as f:
f.write("")
jcsv.writeCSVFile(syncFileOrDir, new_data)
if self.fullSync(remoteIP, syncFileOrDir, False, remoteUser, remotePWD):
return True
return False
def increSyncAll(self, syncFileOrDir="", remoteUser="gdlocal", remotePWD="gdlocal", remoteIP_list=[]):
if not remoteIP_list and not isinstance( remoteIP_list, list):
print("remoteIP_list should be type of list.")
return None
self.mlog.info("remote IP list:%s" %(str(remoteIP_list)))
failIP = []
for remoteIP in remoteIP_list:
if not self.increSync(remoteIP, syncFileOrDir, remoteUser, remotePWD):
failIP.append(remoteIP)
self.mlog.info("increSyncAll failed IPs:%s" %(str(failIP)))
return failIP
def fullSyncAll(self, syncFileOrDir="",remoteUser="gdlocal", remotePWD="gdlocal", remoteIP_list=[]):
if not remoteIP_list and not isinstance(remoteIP_list, list):
print("remoteIP_list should be type of list.")
return None
self.mlog.info("remote IP list:%s" % (str(remoteIP_list)))
failIP = []
for remoteIP in remoteIP_list:
if not self.fullSync(remoteIP, syncFileOrDir, False, remoteUser, remotePWD):
failIP.append(remoteIP)
self.mlog.warning("increSyncAll failed IPs:%s" % (str(failIP)))
return failIP
def mergeLists(self, *args):
"""
# 合并所有的list,自动去除相同项
:param data_1:
:param data_2:
:return: merged list (set)
"""
if not args:
return []
merged_list = set()
for item in args:
if not item:
continue
for i_list in item:
try:
merged_list.add(tuple(i_list))
except Exception as e:
self.mlog.warning("item-%s added failed." % str(i_list))
self.mlog.exception(e)
continue
return sorted(merged_list, key=lambda x: x[0], reverse=False)
def main():
"""
## Python的入口开始
:return:
"""
from utils import my_logger
mlog = my_logger("jc_rsync_tools")
module = sys.modules[__name__]
# getattr() 函数用于返回一个对象属性值。
# sys.argv 是获取运行python文件的时候命令行参数,且以list形式存储参数
# sys.argv[0] 代表当前module的名字
try:
func = getattr(module, sys.argv[1])
except Exception as e:
mlog.exception(e)
else:
args = None
if len(sys.argv) > 1:
args = sys.argv[2:]
mlog.debug("args = %s" %args)
func(*args)
if __name__ == "__main__":
ipp = SyncTool()
ipp.mlog.info("This is only test para.")
#print(ipp.increSync("172.21.204.238", "/tmp/autopanic_ips_stats.csv", remoteUser="gdlocal", remotePWD="gdlocal"))
#print(ipp.increSyncAll( syncFileOrDir="/tmp/autopanic_ips_stats.csv", remoteUser="gdlocal", remotePWD="gdlocal", remoteIP_list=["172.21.204.237", "172.21.204.238","172.21.204.239"]))
#logging.critical(ipp.fullSyncAll(syncFileOrDir="/tmp/autopanic_ips_stats.csv", remoteUser="gdlocal", remotePWD="gdlocal", remoteIP_list=["172.21.204.237", "172.21.204.238", "172.21.204.239"]))
else:
#main()
pass
|
[
"848873227@qq.com"
] |
848873227@qq.com
|
90760b20bcf6f696bcbc1a9d1ad50e5742fd806b
|
ef2bc6b1ba9be1af93e1b96f62b25f5b9866e187
|
/backend/posts/migrations/0003_post_category.py
|
fab13cced492be5b42468bd060f7fc69a6b1df28
|
[] |
no_license
|
fraanaalonso/DRF-REACT
|
9c4ad9af2a0ddbbb31297aa401f9f4785eeb1001
|
36c33e1f2f05d1ccde3bef7c3771ecbc6a2462ef
|
refs/heads/main
| 2023-05-02T03:39:01.359618
| 2021-05-20T16:16:10
| 2021-05-20T16:16:10
| 361,467,387
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# Generated by Django 3.1.8 on 2021-04-22 17:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_category'),
]
operations = [
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(default='ad0a16a3-9976-4c6e-8a91-759140d1fac1', on_delete=django.db.models.deletion.CASCADE, related_name='post_category', to='posts.category'),
preserve_default=False,
),
]
|
[
"fraloal97@gmail.com"
] |
fraloal97@gmail.com
|
78a37497426627418e658256b008de7c350e5025
|
f6f4e6df36e58207fd72276ebd2ee430bd301d03
|
/数据结构/字典.py
|
4e87bd464683bf04b94540d3f1d41969639626ec
|
[] |
no_license
|
aria4013/pythongo
|
1734e16f046deeb79fa231702ff711c092dafbb3
|
9b3d4bc6ad4c64cb1a55264b12d54297262729a2
|
refs/heads/main
| 2023-04-07T05:25:01.534514
| 2021-04-19T09:59:06
| 2021-04-19T09:59:06
| 359,410,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
tel = {'jack': 4098, 'sape': 4139}
tel['guido'] = 4127
print(tel)
tel['jack']
del tel['sape']
tel['irv'] = 4127
print(tel)
list(tel.keys())
sorted(tel.keys())
'guido' in tel
'jack' not in tel
dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
{x: x**2 for x in (2, 4, 6)}
dict(sape=4139, guido=4127, jack=4098)
|
[
"aria4013@outlook.com"
] |
aria4013@outlook.com
|
7926073ef3fe64f8c68f5bcc7891367d0b7c72fd
|
b7fbd979143a35d2dc61e4950aa23219febbeb37
|
/kibitzr_email/exceptions.py
|
16ee63d4db377446967bd3effa92b19462fec61e
|
[
"MIT"
] |
permissive
|
kibitzr/kibitzr-email
|
f5caef7890564be73b2b0b0aad46652a2c5b75d1
|
36e0b2fdd3ff1547d55f4f1aa52f05d4b641e140
|
refs/heads/master
| 2020-03-11T21:51:21.269147
| 2018-07-07T00:41:45
| 2018-07-07T00:41:45
| 130,276,474
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
class UnexpectedResponse(RuntimeError):
pass
class NetworkOutage(RuntimeError):
pass
|
[
"peterdemin@gmail.com"
] |
peterdemin@gmail.com
|
c4879c4d4fce18ab51478e8f15e2633a82985302
|
83b723b1e95187f758fccf0eb584686db53ee8c0
|
/employee_project/employee_register/views.py
|
97a5df702f6e09aba7558cd5884b8cc7e45d4d6b
|
[] |
no_license
|
Anikcse18/officeManagementwebproject
|
c3b6cfb2ef201d8da859e0897e5e976b2621d642
|
1998965cf80a203007db5aa3c84867aac2d5f755
|
refs/heads/master
| 2022-12-08T14:35:24.914465
| 2020-09-08T15:01:39
| 2020-09-08T15:01:39
| 293,844,284
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from django.shortcuts import render,redirect
from .forms import EmployeeForm
from .models import Employee
# Create your views here.
def employee_list(request):
context = {'employee_list': Employee.objects.all()}
return render(request, "employee_register/employee_list.html", context)
def employee_form(request, id=0):
if request.method == "GET":
if id == 0:
form = EmployeeForm()
else:
employee = Employee.objects.get(pk=id)
form = EmployeeForm(instance=employee)
return render(request, "employee_register/employee_form.html", {'form': form})
else:
if id == 0:
form = EmployeeForm(request.POST)
else:
employee = Employee.objects.get(pk=id)
form = EmployeeForm(request.POST,instance= employee)
if form.is_valid():
form.save()
return redirect('/employee/list')
def employee_delete(request,id):
employee = Employee.objects.get(pk=id)
employee.delete()
return redirect('/employee/list')
|
[
"istiakjavedanik@gmail.com"
] |
istiakjavedanik@gmail.com
|
b319a08688d9b352d9ea48f350d2492e1485fe55
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/totally_workwear_au.py
|
e5341eb5c6c1a091ebfaf5469536b0c7d7571898
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
from scrapy import Spider
from locations.dict_parser import DictParser
from locations.hours import OpeningHours
class TotallyWorkwearAU(Spider):
name = "totally_workwear_au"
item_attributes = {"brand": "Totally Workwear", "brand_wikidata": "Q119247989"}
allowed_domains = ["www.totallyworkwear.com.au"]
start_urls = ["https://www.totallyworkwear.com.au/api/places"]
def parse(self, response):
for location in response.json():
item = DictParser.parse(location)
item["ref"] = location["path"]
item["lat"] = location["geometry"]["location"]["lat"]
item["lon"] = location["geometry"]["location"]["lng"]
item["phone"] = location["formatted_phone_number"]
item["website"] = "https://www.totallyworkwear.com.au/store/" + location["path"]
item["opening_hours"] = OpeningHours()
item["opening_hours"].add_ranges_from_string(" ".join(location["opening_hours"]["weekday_text"]))
yield item
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.