blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdf17335b93f83a0479b58d5e3febbf6c730f2de
|
d64f62e9eddb4697755789ae8c963eb7b9211b03
|
/data/plot_minTimeClimb.py
|
432c12857cc15c98896a01caa66e1db7852da25b
|
[] |
no_license
|
naylor-b/om_bench
|
ec5b0115e299aa58dba215320181d8f43a1ed826
|
0de5a76b5b36706b602850b5103efbb5a6cc330c
|
refs/heads/master
| 2020-03-26T12:27:54.776125
| 2018-08-02T22:19:57
| 2018-08-02T22:19:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
Create plots for the Beam problem benchmarks.
"""
from __future__ import print_function
from om_bench.post import post_process, BenchPost
filename = 'minTimeClimb_state_nl_ln_drv.dat'
bp = BenchPost('Min Time Climb')
bp.flagtxt="Simultaneous Derivatives"
bp.title_driver = "Compute Coloring"
bp.special_plot_driver_on_linear = True
bp.equal_axis = True
bp.post_process(filename)
|
[
"kenneth.t.moore-1@nasa.gov"
] |
kenneth.t.moore-1@nasa.gov
|
3cc126f4d09c3e5d014906ac2d74eb41584d7851
|
d7e5d1820bed236dfbc1d12c61ca7c45e6afec6a
|
/Que.Common.Test/Distributions/ConstantDistributionTests.py
|
7790dee7144b32296620596714066c7df567673c
|
[] |
no_license
|
colgategao/Que
|
94a28c3da6b6f32e8b9074b9b4426037bafb0157
|
4752bd3505b418598d6d93b93c0d926699d2a8e9
|
refs/heads/master
| 2021-01-16T19:59:42.153084
| 2015-01-30T00:21:46
| 2015-01-30T00:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
import unittest
class ConstantDistributionTests(unittest.TestCase):
def MinimumParameter(self):
self.fail("Not implemented")
def MinusParameter(self):
self.fail("Not implemented")
if __name__ == '__main__':
unittest.main()
|
[
"andy.wanganqi@gmail.com"
] |
andy.wanganqi@gmail.com
|
68804b8d41d37af3f103a453e4453a04f4b2de44
|
911805e6496e0f529708c07d60062f16967f0046
|
/main.py
|
b548d7f3daf46c697bd8fca52f3cdc6dd6ad5ebb
|
[
"MIT"
] |
permissive
|
esemeniuc/CycleGAN-Music-Style-Transfer
|
b370c93280ae19a4a92120ac504370a2a6edf2ae
|
4010fc6311f30e74fb7e73e9d55c23bb8cf71fbf
|
refs/heads/master
| 2020-05-01T18:48:30.386247
| 2019-01-27T19:09:33
| 2019-01-27T19:09:33
| 177,632,340
| 0
| 0
|
MIT
| 2019-03-25T17:22:23
| 2019-03-25T17:22:23
| null |
UTF-8
|
Python
| false
| false
| 5,081
|
py
|
import argparse
import os
import tensorflow as tf
from model import cyclegan
from style_classifier import Classifer
tf.set_random_seed(19)
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ['SGE_GPU']
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_dir', dest='dataset_dir', default='JAZZ2ROCK', help='path of the dataset')
parser.add_argument('--dataset_A_dir', dest='dataset_A_dir', default='JC_J', help='path of the dataset of domain A')
parser.add_argument('--dataset_B_dir', dest='dataset_B_dir', default='JC_C', help='path of the dataset of domain B')
parser.add_argument('--epoch', dest='epoch', type=int, default=100, help='# of epoch')
parser.add_argument('--epoch_step', dest='epoch_step', type=int, default=10, help='# of epoch to decay lr')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='# images in batch')
parser.add_argument('--train_size', dest='train_size', type=int, default=1e8, help='# images used to train')
parser.add_argument('--load_size', dest='load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--fine_size', dest='fine_size', type=int, default=128, help='then crop to this size')
parser.add_argument('--time_step', dest='time_step', type=int, default=64, help='time step of pianoroll')
parser.add_argument('--pitch_range', dest='pitch_range', type=int, default=84, help='pitch range of pianoroll')
parser.add_argument('--ngf', dest='ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', dest='ndf', type=int, default=64, help='# of discri filters in first conv layer')
parser.add_argument('--input_nc', dest='input_nc', type=int, default=1, help='# of input image channels')
parser.add_argument('--output_nc', dest='output_nc', type=int, default=1, help='# of output image channels')
parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--which_direction', dest='which_direction', default='AtoB', help='AtoB or BtoA')
parser.add_argument('--phase', dest='phase', default='train', help='train, test')
parser.add_argument('--save_freq', dest='save_freq', type=int, default=1000, help='save a model every save_freq iterations')
parser.add_argument('--print_freq', dest='print_freq', type=int, default=100, help='print the debug information every print_freq iterations')
parser.add_argument('--continue_train', dest='continue_train', type=bool, default=False, help='if continue training, load the latest model: 1: true, 0: false')
parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./samples', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--log_dir', dest='log_dir', default='./log', help='logs are saved here')
parser.add_argument('--L1_lambda', dest='L1_lambda', type=float, default=10.0, help='weight on L1 term in objective')
parser.add_argument('--gamma', dest='gamma', type=float, default=1.0, help='weight of extra discriminators')
parser.add_argument('--use_midi_G', dest='use_midi_G', type=bool, default=False, help='select generator for midinet')
parser.add_argument('--use_midi_D', dest='use_midi_D', type=bool, default=False, help='select disciminator for midinet')
parser.add_argument('--use_lsgan', dest='use_lsgan', type=bool, default=False, help='gan loss defined in lsgan')
parser.add_argument('--max_size', dest='max_size', type=int, default=50, help='max size of image pool, 0 means do not use image pool')
parser.add_argument('--sigma_c', dest='sigma_c', type=float, default=1.0, help='sigma of gaussian noise of classifiers')
parser.add_argument('--sigma_d', dest='sigma_d', type=float, default=1.0, help='sigma of gaussian noise of discriminators')
parser.add_argument('--model', dest='model', default='base', help='three different models, base, partial, full')
parser.add_argument('--type', dest='type', default='cyclegan', help='cyclegan or classifier')
args = parser.parse_args()
def main(_):
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
if args.type == 'cyclegan':
model = cyclegan(sess, args)
model.train(args) if args.phase == 'train' else model.test(args)
if args.type == 'classifier':
classifier = Classifer(sess, args)
classifier.train(args) if args.phase == 'train' else classifier.test(args)
if __name__ == '__main__':
tf.app.run()
|
[
"noreply@github.com"
] |
esemeniuc.noreply@github.com
|
2b9beb0be239d6c0985cc1c591ed3da3f23448e8
|
49bd144b5d0d4e7588cb4ae26f9531fcc8606065
|
/ion/util/testing_utils.py
|
45b9ddf18e9281136135bf4a4a8062729e34c918
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
unwin/coi-services
|
ba80b8cc77189faf6b0faf04787b3567b6458f8f
|
43246f46a82e597345507afd7dfc7373cb346afa
|
refs/heads/master
| 2021-01-21T03:33:15.541500
| 2014-07-30T18:21:24
| 2014-07-30T18:21:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
#!/usr/bin/env python
"""Utilities for unit and integration testing"""
from pyon.core import bootstrap
from pyon.ion.resource import get_restype_lcsm
from pyon.public import BadRequest, PRED, RT
def create_dummy_resources(res_list, assoc_list=None, container=None):
"""
Creates all resources of res_list. The names of the resources will be the keys in the returned dict
The elements of the list are tuples with resource object as first element and optional actor name as second element.
Can also create associations linking resources by name.
Assoc_list is a list of 3-tuples subject name, predicate, object name.
"""
container = container or bootstrap.container_instance
rr = container.resource_registry
res_by_name = {}
for res_entry in res_list:
if isinstance(res_entry, dict):
res_obj = res_entry["res"]
actor_id = res_by_name[res_entry["act"]] if "act" in res_entry else None
elif type(res_entry) in (list, tuple):
res_obj = res_entry[0]
actor_id = res_by_name[res_entry[1]] if len(res_entry) > 1 else None
else:
raise BadRequest("Unknown resource entry format")
res_name = res_obj.name
res_obj.alt_ids.append("TEST:%s" % res_name)
res_lcstate = res_obj.lcstate
rid, _ = rr.create(res_obj, actor_id=actor_id)
res_by_name[res_name] = rid
lcsm = get_restype_lcsm(res_obj.type_)
if lcsm and res_lcstate != lcsm.initial_state:
rr.set_lifecycle_state(rid, res_lcstate)
if isinstance(res_entry, dict):
if "org" in res_entry:
rr.create_association(res_by_name[res_entry["org"]], PRED.hasResource, rid)
if assoc_list:
for assoc in assoc_list:
sname, p, oname = assoc
if type(sname) in (list, tuple) and type(oname) in (list, tuple):
for sname1 in sname:
for oname1 in oname:
s, o = res_by_name[sname1], res_by_name[oname1]
rr.create_association(s, p, o)
elif type(oname) in (list, tuple):
for oname1 in oname:
s, o = res_by_name[sname], res_by_name[oname1]
rr.create_association(s, p, o)
elif type(sname) in (list, tuple):
for sname1 in sname:
s, o = res_by_name[sname1], res_by_name[oname]
rr.create_association(s, p, o)
else:
s, o = res_by_name[sname], res_by_name[oname]
rr.create_association(s, p, o)
return res_by_name
def create_dummy_events(event_list, container=None):
container = container or bootstrap.container_instance
ev_by_alias = {}
for (alias, event) in event_list:
evid, _ = container.event_repository.put_event(event)
ev_by_alias[alias] = evid
return ev_by_alias
|
[
"mmeisinger@ucsd.edu"
] |
mmeisinger@ucsd.edu
|
afec1d064271e059113884278dd49ac4092aeb86
|
f86a3aa610f22b30b13889b002d15301c3759a13
|
/rami/bin/rami/bin/easy_install
|
283dd259b20c13c2214a4c2f2cb0317f7d8c6277
|
[] |
no_license
|
whosawme/Loglist
|
17f8f4d0c1fcd8f3129545a28657d94eb604c36b
|
10e859a92d43be84d217498f2978105d473c665a
|
refs/heads/master
| 2020-04-02T08:09:44.107213
| 2018-11-02T04:57:19
| 2018-11-02T04:57:19
| 154,231,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/Users/brocks/rami/rami/bin/rami/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ramibhj@gmail.com"
] |
ramibhj@gmail.com
|
|
921cbba76f26fcb27d90366d3081065543164299
|
d774c7187cba029012873160ba832ebe24c9b7d3
|
/api/view_basic.py
|
5d9c51a905d038c7f4a9b49853d269ae681d55a9
|
[] |
no_license
|
linyup/CrazyTester
|
cad8cd974314fe969db4a9cb6e9d4c3ee82a6509
|
ea12fa133aaa84e35bf726633d1f3e2e008c7d26
|
refs/heads/master
| 2023-08-24T11:30:36.194364
| 2021-11-02T07:20:14
| 2021-11-02T07:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,244
|
py
|
from django.http import JsonResponse, FileResponse
from django.views.decorators.http import require_http_methods
from django.forms.models import model_to_dict
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.conf import settings
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.db import connection, close_old_connections
from api.extensions.custom_response import *
from queue import Queue
from api.models import *
import json
import time
import datetime
import os
import base64
# @require_http_methods(["GET"])
def test(req):
if req.user.is_superuser:
print("是")
else:
print("不是")
def task_count():
tasks = TestTask.m.filter()
all_count = 0
for task in tasks:
count = len(str_to_list(task.case_ids))
print(task.id, count)
all_count += count
print(all_count)
return response_200()
# 发送邮件调试
def test2(req):
wwx = WorkWXApply(2)
# response_400(data={"a":1}, raise_flag=True)
if not wwx.msg:
textcard = {
"description": "接口测试平台主页:\n<a href=\"http://10.112.16.6/home\">http://10.112.16.6/home</a>",
"title": "测试任务-测试报告",
"btntxt": "查看详情",
"url": "http://10.112.16.6/home"
}
wwx.send_msg("guojing02", textcard)
if wwx.msg:
return response_400(wwx.msg)
else:
return response_200()
else:
return response_400(wwx.msg)
# item = {
# "body": str(req.body),
# "GET": dict(req.GET),
# "POST": dict(req.POST),
# "user": req.POST.get("user", None)
# }
return response_200(item)
def get_now_time():
return datetime.datetime.now()
def json_dumps(data):
return json.dumps(data, ensure_ascii=False)
def json_dumps_indent4(data):
return json.dumps(data, ensure_ascii=False, indent=4)
def page_handel(req, raw_data, model_to_dict_handle=True):
item = {
"page": None,
"page_size": None,
"count": len(raw_data),
"data": [],
}
page = req.GET.get("page", "1")
page_size = req.GET.get("page_size", "10")
# page_size为-1表示要所有数据不分页
if page_size == "-1":
data = raw_data
else:
# page 如果只包含正整数并且不为0 则 int(), 否则为1
page = int(page) if page.isdigit() and page != "0" else 1
page_size = int(page_size) if page_size.isdigit() and page_size != "0" else 10
data = raw_data[((page - 1) * page_size):((page - 1) * page_size) + page_size]
item["page"] = page
item["page_size"] = page_size
if model_to_dict_handle:
for i in data:
item["data"].append(model_to_dict(i))
else:
item["data"] = data
return item
def get_user_info_for_session(req, item=None, create=False):
"""
从用户信息
:param req:
:param create: 是否为创建
:param item: 如果传了item,会把用户信息user_info update到item中,
:return: user_info
"""
try:
user = str(req.session.get("user"))
user_id = req.session.get('_auth_user_id')
# print(req.session)
except Exception as e:
response_400_raise_exception("获取用户名,用户id时出错:{}".format(e))
else:
c_time = str(get_now_time())
user_info = {
"latest_update_user": user,
"latest_update_user_id": user_id,
"u_date": c_time,
}
if create:
user_info["create_user"] = user
user_info["create_user_id"] = user_id
user_info["c_date"] = c_time
if item:
item.update(user_info)
return user_info
@login_required
@require_http_methods(["POST"])
def excel_json_auto_switch(req):
sample_data_raw = req.POST.get("sample_data", "")
sample_data = None
if not sample_data_raw:
return response_400("样例数据为空!")
try:
sample_data_json = json.loads(sample_data_raw)
except:
sample_data = sample_data_wsitch_json(sample_data_raw)
else:
if type(sample_data_json) == str:
sample_data = sample_data_wsitch_json(sample_data_raw)
elif type(sample_data_json) == list:
sample_data = sample_data_wsitch_excel(sample_data_json)
else:
return response_400("样例数据格式有误,请检查修改!")
return response_200(sample_data=sample_data)
def sample_data_wsitch_excel(sample_data_json):
lines = []
if not sample_data_json:
return ""
try:
headers = sample_data_json[0].keys()
lines.append("\t".join(headers))
for i in sample_data_json:
item = []
for h in headers:
item.append(i[h])
lines.append(" ".join(item))
sample_data_str = "\n".join(lines)
return sample_data_str
except Exception as e:
return response_400("样例数据格式有误,请检查修改!")
def sample_data_wsitch_json(sample_data_raw):
sample_data_json = []
try:
sample_data_list = [i.split("\t") for i in sample_data_raw.strip(" ").split("\n") if i]
row = len(sample_data_list[0])
cos = len(sample_data_list)
if cos == 1:
response_400_raise_exception("测试样例数据有误1,请检查!(至少两行数据)")
for c in range(1, cos):
sample_data_item = {}
for r in range(row):
sample_data_item[sample_data_list[0][r]] = sample_data_list[c][r]
sample_data_json.append(sample_data_item)
sample_data_json = json_dumps_indent4(sample_data_json)
return sample_data_json
except Exception as e:
response_400_raise_exception("测试样例数据有误2,请检查!\r\n{}".format(e))
# 分割逗号
def str_to_list(strs, flag=","):
"""
:param strs: 字符串
:param flag: 分隔符
:return: 字符串以分隔符分割,并且每个元素strip并且不要空的
"1,2 ,2 ,3, ," ==> ['1', '2', '2', '3']
"""
strs = strs or ""
return [i.strip() for i in strs.strip().split(flag) if i.strip()]
@require_http_methods(["POST"])
# 只限params, kv格式转成json格式
def switch_json(req):
param_keys = req.POST.getlist("param_key", [])
param_values = req.POST.getlist("param_value", [])
try:
warning = []
params = {}
# 键值对格式转换成dict格式
for i in range(len(param_keys)):
if param_keys[i].strip():
params[param_keys[i]] = param_values[i]
else:
warning.append("params名称为空的参数已被忽略!")
# print(params)
warning = ",".join(warning)
return response_200(data=params, warning=warning)
except Exception as e:
return response_400("出错:{}".format(e))
@require_http_methods(["POST"])
def switch_kv(req):
jp = req.POST.get("json_params", "")
try:
data = json.loads(jp)
return response_200(data=data)
except:
return response_400("json格式数据有错误或者为空!")
@require_http_methods(["POST"])
# f12 粘贴的数据格式转换成json格式
def F12_p_to_json(req):
params = req.POST.get("params", "")
try:
dict_params = {i.split(": ")[0]: i.split(": ", 1)[-1] for i in params.split("\n") if i}
data = json_dumps_indent4(dict_params)
return response_200(data=data)
except Exception as e:
return response_400("出错:{}".format(e))
# 将从表单获取的kv形式的param和header转换成字典
def kv_switch_dict(k, v):
"""
k,v 皆为长度一样的列表,返回{k[i]:v[i]...}
"""
item = {}
warning = []
for i in range(len(k)):
if k[i].strip():
item[k[i]] = v[i]
else:
warning.append("名称为空的参数已被忽略!")
return {
"temp": item,
"warning": warning,
}
# 验证非空
def verify_not_is_None(item, fileds):
"""
:param item: 字典格式数据
:param fileds: 要验证的字段
:return: 如果是空的字段,返回字段名,
所有字段都不为空,返回None
"""
for i in fileds:
if not item[i]:
return i
else:
return None
# 验证是否为json格式
def verify_is_json_and_switch(item, fileds, switch=True):
"""
:param item: 字典格式数据
:param fileds: 字典里面要验证的字段
:return: 是josn格式,返回None,否则返回错误信息
"""
for i in fileds:
# 如果值不为None
if item[i]:
try:
j = json.loads(item[i])
if type(j) == str or type(j) == int:
return "{}必须为json格式,当前为字符串或数字格式!".format(i)
if switch:
item[i] = j
except:
return "{}必须为json格式".format(i)
else:
if type(j) == str:
return "{}必须为json格式".format(i)
# 字典转换json
def dict_to_json(item, fileds):
for i in fileds:
if item[i]:
item[i] = json_dumps(item[i])
else:
item[i] = ""
@require_http_methods(["POST"])
# 添加时间戳
def add_sign(req):
"""
获取cookies值,转换成字典格式,将字典中sign的值赋为当前时间戳
"""
try:
cookies = req.POST["cookies"]
if cookies:
dict_cookies = json.loads(cookies)
else:
dict_cookies = {}
timestamp = str(int(time.time() * 1000))
dict_cookies['sign'] = timestamp
data = json_dumps_indent4(dict_cookies)
return response_200(data=data)
except Exception as e:
return response_400("错误:{}".format(e))
# 多线程
class Futures:
def __init__(self, max_workers):
self.executor = ThreadPoolExecutor(max_workers=max_workers) # 线程池,执行器
self.tasks = [] # 线程集合
def submit(self, func, arg, *args, **kwargs):
task = self.executor.submit(func, arg, *args, **kwargs)
self.tasks.append(task)
return task
def as_completed(self):
"""
:return: 阻塞主进程,直到所有线程完成任务
"""
for future in as_completed(self.tasks):
# print("等待...{}".format(len(self.tasks)))
future.result()
@require_http_methods(["GET"])
def download(req, file_path):
file_path_real = os.path.join(settings.MEDIA_ROOT_VIRTUAL, file_path)
if not os.path.exists(file_path_real):
return response_404("文件不存在:{}!".format(file_path))
file = open(file_path_real, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
# response['Content-Type'] = 'application/vnd.ms-excel' # 注意格式
response['Content-Disposition'] = 'attachment;filename="{}"'.format(file_path_real.rsplit("/")[-1])
return response
def customer_get_list(model, filter_item=None, order_by=None):
"""
:param model: 模型类对象
:param filter_item: 字典
:param order_by: 元祖,传入 () 时不排序
:return:
"""
filter_item = filter_item or {}
order_by = order_by or ("title", ) if order_by != -1 else ()
datas = []
try:
infos = model.m.filter(**filter_item).order_by(*order_by)
for i in infos:
datas.append(model_to_dict_custom(i))
except Exception as e:
return response_400("错误信息:{}".format(e))
return response_200(data=datas)
def model_to_dict_custom(model):
data = model_to_dict(model, exclude=["isDelete", "c_date", "u_date"])
data["c_date"] = model.c_date.strftime('%Y-%m-%d %H:%M:%S')
data["u_date"] = model.u_date.strftime('%Y-%m-%d %H:%M:%S')
return data
def api_ids_handle(item):
"""
:param item: 根据item中的project_id, group_ids,api_ids获取对应所有api_id
会在item中补充group_title_list,api_title_list字段
:return: 返回 msg(错误信息)和api_id_list
"""
api_id_list = None
def set_msg_and_return(msg=""):
if msg:
item["msg"] = msg
item["isValid"] = False
# item["group_title_list"] = json_dumps(['测试内容失效!:{}'.format(msg)])
# item["api_title_list"] = json_dumps(['测试内容失效!:{}'.format(msg)])
item["group_title_list"] = ""
item["api_title_list"] = ""
return msg, api_id_list
if not item["group_ids"] and not item["api_ids"]:
return set_msg_and_return("未填写分组id或接口id!")
# 验证project_id
if item["project_id"]:
try:
pro_data = ApiProject.m.get(id=item["project_id"])
except:
return set_msg_and_return("不存在的项目id:{}".format(item["project_id"]))
else:
item["project_title"] = pro_data.title
else:
return set_msg_and_return("未填写项目id!")
# 获取group_id_list, group_title_list
group_id_list = []
group_title_list = []
if item["group_ids"] == "all":
group_list = ApiGroup.m.filter(project=item["project_id"])
for i in group_list:
group_id_list.append(i.id)
group_title_list.append(i.title)
else:
# 判断所有group_id是否存在
group_id_list = str_to_list(item["group_ids"])
for group_id in group_id_list:
try:
group_data = ApiGroup.m.get(id=int(group_id))
except:
return set_msg_and_return("不存在的分组id:{}".format(group_id))
else:
group_title_list.append("分组:{} -- {}".format(group_data.id, group_data.title))
# 获取api_id_list, api_title_list
api_id_list = str_to_list(item["api_ids"])
api_title_list = []
# 判断所有api_id是否存在
for api_id in api_id_list:
try:
api_data = ApiApi.m.get(id=int(api_id))
except:
return set_msg_and_return("不存在的接口id:{}".format(api_id))
else:
api_title_list.append("接口:{} -- {}".format(api_data.id, api_data.title))
# 解析group下的所有api,放到api_id_list中
for group_id in group_id_list:
apis = ApiApi.m.filter(group=int(group_id))
for api in apis:
api_id_list.append(str(api.id))
api_id_list = list(set(api_id_list)) # 去重,防止group下的api和api重复
if not api_id_list:
return set_msg_and_return("分组id与接口id 清洗后没有符合的数据!")
item["group_title_list"] = json_dumps(group_title_list) if group_title_list else ""
item["api_title_list"] = json_dumps(api_title_list) if api_title_list else ""
# return group_id_list, api_id_list
return set_msg_and_return()
def case_ids_handle(item):
def set_msg_and_return(msg=""):
if msg:
item["msg"] = msg
item["isValid"] = False
# item["case_title_list"] = json_dumps(['测试内容失效!:{}'.format(msg)])
item["case_title_list"] = ""
return msg, case_id_list
case_title_list = []
case_id_list = str_to_list(item["case_ids"])
for case_id in case_id_list:
try:
case_data = ApiCase.m.get(id=int(case_id))
case_title_list.append("用例:{} -- {}".format(case_data.id, case_data.title))
except:
return set_msg_and_return("不存在的用例id:{}".format(case_id))
if not case_title_list:
return set_msg_and_return("没有填写用例!")
item["case_title_list"] = json_dumps(case_title_list) if case_title_list else ""
return set_msg_and_return()
def get_all_projects(filter_item=None, **kwargs):
if filter_item is None:
filter_item = {}
filter_item.update(kwargs)
return ApiProject.m.filter(**filter_item).order_by("title")
# 每个项目 分组 接口 用例 统计 柱形图
def staticitem_project(req):
# data = []
project_title_list = []
group_count_list = []
api_count_list = []
case_count_list = []
cursor = connection.cursor()
case_sql = "SELECT api_id, count(*) FROM api_case WHERE isDelete=false group by api_id;"
cursor.execute(case_sql)
cases = cursor.fetchall()
projects = get_all_projects()
for project in projects:
project_title_list.append(project.title)
groups = ApiGroup.m.filter(project_id=project.id)
group_count = len(groups)
api_count = 0
case_count = 0
for gourp in groups:
apis = ApiApi.m.filter(group_id=gourp.id)
api_count += len(apis)
for api in apis:
for case in cases:
if case[0] == api.id:
case_count += case[1]
group_count_list.append(group_count)
api_count_list.append(api_count)
case_count_list.append(case_count)
# data.append({project.title: [group_count, api_count, case_count]})
# print("{}: 分组:{}个,接口{}个,用例{}个\n".format(
# project.title, group_count, api_count, case_count))
# print("总计{}个用例".format(len(ApiCase.m.filter())))
return response_200(
project_title_list=project_title_list,
group_count_list=group_count_list,
api_count_list=api_count_list,
case_count_list=case_count_list,
all_count=len(ApiCase.m.filter()))
# 项目下任务数量统计、任务中包含总用例数量统计
def staticitem_task(req):
data = []
projects = get_all_projects()
for project in projects:
task_count = TestTask.m.filter(project_id=project.id).count()
data.append({"name": project.title, "value": task_count})
tasks = TestTask.m.filter()
case_count_for_task = 0
for task in tasks:
count = len(str_to_list(task.case_ids))
case_count_for_task += count
return response_200(data=data, task_count=TestTask.m.filter().count(),
case_count_for_task=case_count_for_task)
# 每个项目 最近100次、10次成功情况/最近七天成功失败次数
def staticitem_recent(req):
# 最近七天成功失败次数
project_id = req.GET.get("project_id", "")
filter_item = {}
if project_id:
filter_item["id"] = project_id
days = [] # 最近七天 datetime时间格式,需要str才能得到 2021-07-02
today = datetime.date.today() # 获得今天的日期
for i in range(7):
day = today - datetime.timedelta(days=i)
days.insert(0, day)
colors = ["#5470c6", "#91cc75", "#fac858", "#ee6666",
"#73c0de", "#3ba272", "#fc8452", "#9a60b4",
"#ea7ccc", "#f173ac", "#f05b72",
"#fdb933", "#f26522", "#ef5b9c"]
colors_q = Queue()
for color in colors:
colors_q.put(color)
titles = []
series = []
cursor = connection.cursor()
projects = get_all_projects(filter_item)
username = str(req.session.get("user"))
for project in projects:
if username not in str_to_list(project.users or ""):
continue
if colors_q.empty():
for color in colors:
colors_q.put(color)
title_succeed = "{} - 成功数量".format(project.title)
titles.append(title_succeed)
title_fail = "{} - 失败数量".format(project.title)
titles.append(title_fail)
temp_succeed = {
"name": title_succeed,
"type": 'bar',
"stack": project.title,
"emphasis": {
"focus": 'series'
},
"data": [],
"itemStyle": {
"normal": {
"label": {
"show": True,
"position": 'middle',
"textStyle": {
"color": 'black',
"fontSize": 24 if project_id else 12
}
},
"color": "#73c0de" if project_id else colors_q.get()
}
}
}
temp_fail = {
"name": title_fail,
"type": 'bar',
"stack": project.title,
"emphasis": {
"focus": 'series'
},
"data": [],
"itemStyle": {
"normal": {
"label": {
"show": True,
"position": 'top',
"textStyle": {
"color": 'red',
"fontSize": 24 if project_id else 14
}
},
"color": "#ee6666" if project_id else colors_q.get()
}
}
}
for d in days:
cursor.execute(
"select count(*) from api_test_report where (c_date between '{}' and '{}') and project_title = '{}' and test_ret = 1".format(
str(d), str(d + datetime.timedelta(days=1)), project.title
))
succed_count = str(cursor.fetchone()[0])
temp_succeed["data"].append(succed_count)
cursor.execute(
"select count(*) from api_test_report where (c_date between '{}' and '{}') and project_title = '{}' and test_ret = 0".format(
str(d), str(d + datetime.timedelta(days=1)), project.title
))
fail_count = cursor.fetchone()[0]
temp_fail["data"].append(str(fail_count))
series.append(temp_succeed)
series.append(temp_fail)
return response_200(series=series, titles=titles, days=days)
# 按人员统计 用例总数,近7周增加
def staticitem_user(req):
days_raw = [] # ["2021-07-01", "2021-07-07", ...]
days = [] # ["2021-07-01 -- 2021-07-07", "2021-07-01 -- 2021-07-07", ...]
# 测试组全部成员,有限使用自定义的,没有自定义的,用全部的django用户
try:
users = str_to_list(ApiUser.m.get(type_id="1").users or "")
except:
users = []
users_raw = User.objects.filter()
for user in users_raw:
users.append(user.username)
series = []
today = datetime.date.today() # 获得今天的日期
# 本周第一天和最后一天
this_week_start = today - datetime.timedelta(days=today.weekday())
# this_week_end = today + datetime.timedelta(days=6 - today.weekday())
days_raw.append([this_week_start, today])
for i in range(1, 7):
# 上周第一天和最后一天
last_week_start = today - datetime.timedelta(days=today.weekday() + 7 * i)
last_week_end = today - datetime.timedelta(days=today.weekday() + 1 + 7 * (i-1))
days_raw.append([last_week_start, last_week_end])
# print(days)
for user in users:
temp = {
"name": user,
"type": 'line',
"data": []
}
series.append(temp)
cursor = connection.cursor()
for day in days_raw:
days.append("{} - {}".format(str(day[0]).replace("-", "/"), str(day[1]).replace("-", "/")))
sql = "SELECT create_user, count(*) FROM api_case WHERE (c_date between '{}' and '{}') and isDelete=false GROUP BY create_user;".format(
str(day[0]), str(day[1] + datetime.timedelta(days=1)))
# print(sql)
cursor.execute(sql)
rets = cursor.fetchall()
for serie in series:
for ret in rets:
if ret[0] == serie["name"]:
serie["data"].append(ret[1])
break
else:
serie["data"].append(0)
return response_200(series=series, users=users, days=days)
# 统计每个人创建的用例数量
def staticitem_user2(req):
series = []
cursor = connection.cursor()
sql = "SELECT create_user_id, count(*) FROM api_case WHERE isDelete=false group by create_user_id;"
cursor.execute(sql)
for line in cursor.fetchall():
series.append({"name": User.objects.get(id=line[0]).username or "无", "value": line[1]})
return response_200(series=series)
# def get_host_ip():
# try:
# s = socket.socket()
# s.connect(("www.baidu.com", 80))
# ip = s.getsockname()[0]
# finally:
# s.close()
# return ip
def strToBase64(s):
'''
将字符串转换为base64字符串
:param s:
:return:
'''
strEncode = base64.b64encode(s.encode('utf8'))
return str(strEncode, encoding='utf8')
def base64ToStr(s):
'''
将base64字符串转换为字符串
:param s:
:return:
'''
strDecode = base64.b64decode(bytes(s, encoding="utf8"))
return str(strDecode, encoding='utf8')
|
[
"547684776@qq.com"
] |
547684776@qq.com
|
7b45039e57dfbcd87822b96699a38579a3b9b079
|
c4e5cb5d744c5c0fdf031121073c34016f6a9721
|
/insureAnts/comView/views.py
|
a4d6f73dfad534fd6f811707f01de5689e13e361
|
[] |
no_license
|
nfgrawker/Django_project
|
0d7e819db025e732596a2034d146e2f668931948
|
fee58cc419e084b7c4769184b03663b9852db1b4
|
refs/heads/master
| 2021-06-15T23:55:39.064692
| 2021-02-21T16:35:52
| 2021-02-21T16:35:52
| 155,213,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Post
def home(request):
return render(request, "comView/login.html")
def about(request):
return render(request, "comView/about.html")
def index(request):
return render(request, "comView/index.html")
def tables(request):
context = { 'posts' : Post.objects.all()}
return render(request, "comView/tables.html", context)
def charts(request):
return render(request, "comView/charts.html")
def forgot(request):
return render(request, "comView/forgot-password.html")
|
[
"nfgrawker@gmail.com"
] |
nfgrawker@gmail.com
|
ef8f564e90f6902bd7e910905b8b070b870a167f
|
9a6bbda7b44ef12895058eac593fd13964209acd
|
/main.py
|
f9c9d13dfba51be58128bbbed5164838bd4a6f2e
|
[] |
no_license
|
m358807551/game_tree
|
b30369ee3c72e7571b79da4aad7a124eca7822ad
|
fb45614263e8ce1bd05f9a0ef744e90eac38b5f4
|
refs/heads/master
| 2021-01-20T08:07:45.590414
| 2017-05-03T03:17:14
| 2017-05-03T03:17:14
| 90,103,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# coding=utf-8
from core import GameTree
from game import HorseRace
from game import ChampionSolo
def main1():
game_tree = GameTree(6, HorseRace)
print game_tree.best_strategy(), game_tree.root.weight
def main2():
game_tree = GameTree(10, ChampionSolo)
print game_tree.best_strategy(), game_tree.root.weight
if __name__ == "__main__":
main2()
|
[
"mayupeng@wanwujieshu.net"
] |
mayupeng@wanwujieshu.net
|
bcee10fcaec210f62e55fa54bd72e2abe27cc9b7
|
754188242cd651e6398391f0917e0f3212176874
|
/icheckgames/templatetags/isUserGame.py
|
8cab96d4ec0fe269bfe5af8a47aa75ec81628da4
|
[] |
no_license
|
abhidevmumbai/checkit
|
7d5fee6f96b891f13dc4bd9b4266ef0328a351d5
|
93677d6556f47285537bbc3fcc93ac4af30cd211
|
refs/heads/master
| 2021-01-13T14:38:03.483501
| 2013-07-10T12:10:16
| 2013-07-10T12:10:16
| 9,027,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from django import template
from icheckgames.models import GameMap
register = template.Library()
'''
Template tag function to check if the game exists in the
user's list
'''
@register.simple_tag
def isUserGame(user, obj):
try:
user.gamemap_set.get(game=obj)
return 'checked'
except:
return
|
[
"abhishekd@snapstick.com"
] |
abhishekd@snapstick.com
|
8b33329ab9ca98f83c03387c11ff53907a43e3bd
|
6fa6ad60ef3150176b840de908088ccc34221c16
|
/venv/Scripts/pip3.6-script.py
|
f72b1df01b56efdf6e110f807e3b049900fcde9d
|
[] |
no_license
|
ZeddZoo/Jingle
|
2212c78364b9931830047ae949d8ea8a31a6fdf1
|
40aff13614e558cbe5e8ff9405fb30c53a94e6ca
|
refs/heads/master
| 2020-04-07T14:19:04.654954
| 2018-12-05T19:52:14
| 2018-12-05T19:52:14
| 158,442,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#!C:\Users\Ziyi\Desktop\TermProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"44912877+ZeddZoo@users.noreply.github.com"
] |
44912877+ZeddZoo@users.noreply.github.com
|
05b77809f0e545d0f545d863da90e44de51c2c7d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_077/ch4_2020_03_20_12_34_54_305180.py
|
a7402ae72f0e4cfe2f3c9cb1eb90a66f7458df8c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def classifica_idade(x):
if x<=11:
return (crianca)
else:
if x<=17 x=>12:
return (adolescente)
else:
if x=>18:
return(adulto)
|
[
"you@example.com"
] |
you@example.com
|
7e1b3ac3647139d0f383dedf5b9d049c36240904
|
6c782576c19f17ece09548004027991faeddd5a4
|
/apps/order/models.py
|
e05fe1220b66320d4b25adfb6ee81215187135d5
|
[] |
no_license
|
zl6892191/daydayflash
|
f7db20d0d5153e425e03f5c1f3e44c27e4ce170a
|
a921a0dd1d12e1586ff4ffdc5b485fb830b8980a
|
refs/heads/master
| 2020-03-13T13:56:14.660412
| 2018-04-26T11:53:03
| 2018-04-26T11:53:03
| 131,148,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,293
|
py
|
from django.db import models
from db.base_model import BaseModel
# Create your models here.
class OrderInfo(BaseModel):
'''订单模型类'''
PAY_METHODS = {
'1': "货到付款",
'2': "微信支付",
'3': "支付宝",
'4': '银联支付'
}
PAY_METHOD_CHOICES = (
(1, '货到付款'),
(2, '微信支付'),
(3, '支付宝'),
(4, '银联支付')
)
ORDER_STATUS = {
1:'待支付',
2:'待发货',
3:'待收货',
4:'待评价',
5:'已完成'
}
ORDER_STATUS_CHOICES = (
(1, '待支付'),
(2, '待发货'),
(3, '待收货'),
(4, '待评价'),
(5, '已完成')
)
order_id = models.CharField(max_length=128, primary_key=True, verbose_name='订单id')
user = models.ForeignKey('user.User', verbose_name='用户')
addr = models.ForeignKey('user.Address', verbose_name='地址')
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=3, verbose_name='支付方式')
total_count = models.IntegerField(default=1, verbose_name='商品数量')
total_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='商品总价')
transit_price = models.DecimalField(max_digits=10, decimal_places=2,verbose_name='订单运费')
order_status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name='订单状态')
trade_no = models.CharField(max_length=128, default='', verbose_name='支付编号')
class Meta:
db_table = 'df_order_info'
verbose_name = '订单'
verbose_name_plural = verbose_name
class OrderGoods(BaseModel):
'''订单商品模型类'''
order = models.ForeignKey('OrderInfo', verbose_name='订单')
sku = models.ForeignKey('goods.GoodsSKU', verbose_name='商品SKU')
count = models.IntegerField(default=1, verbose_name='商品数目')
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='商品价格')
comment = models.CharField(max_length=256, default='', verbose_name='评论')
class Meta:
db_table = 'df_order_goods'
verbose_name = '订单商品'
verbose_name_plural = verbose_name
# Create your models here.
|
[
"258306908@qq.com"
] |
258306908@qq.com
|
96b1b935b4050b4196ba8415dbd5dc3187a6da93
|
01c0c44238b060aabbb92c51d77d4f8b9db78b20
|
/core_application/urls.py
|
564eb03222670908f32ae30ee665f9b5eedf7688
|
[] |
no_license
|
Manimaran11/Video_Upload
|
5312b9cc04d81c07c39355202a5328a8ac1586d4
|
313a3064b25aca296adfa46718b1cb21109c6121
|
refs/heads/master
| 2020-07-31T12:45:50.405805
| 2019-09-30T13:15:58
| 2019-09-30T13:15:58
| 210,607,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls import include, url # For django versions before 2.0
from django.urls import include, path # For django versions from 2.0 and up
from video_application.views import HomeView, NewVideo, CommentView, LoginView, RegisterView, VideoView, VideoFileView,LogoutView
urlpatterns = [
path('admin/', admin.site.urls,name='admin'),
path('', include('video_application.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
[
"pmanimaran097@gmail.com"
] |
pmanimaran097@gmail.com
|
bf296306ff4e50b59c299c2b4abdedafb41ee1ac
|
df46cf831cba3e2b91e794be6e03f0cbd1f2f8af
|
/p53.py
|
19085be8d40163444a5cba5fc64cd3578d6ab333
|
[] |
no_license
|
manjupinky/Phyton
|
7653022f8a9b64f7213902c34998e7900f529b5e
|
711bb5f491a904c1e35b4204223a90d8f3c16a6e
|
refs/heads/master
| 2020-03-27T10:10:07.963601
| 2019-01-22T07:58:54
| 2019-01-22T07:58:54
| 146,399,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
m=int(input("Enter a number:"))
tot=0
while(m>0):
dig=m%10
tot=tot+dig
m=m//10
print("The total sum of digits is:",tot)
|
[
"noreply@github.com"
] |
manjupinky.noreply@github.com
|
b78d1274026ad79ba65fd9251177d41ebcc34b16
|
a17b602c8c2ce0405622c57dd47218b17e57718d
|
/test_class/language_survey.py
|
4216813046b47700e3f50d81111a63b623419d6a
|
[] |
no_license
|
rowlingz/Deepen-learning-Python
|
a5f2fe75b6633c6786912d23d414d303f1d4ade8
|
9b55b49b468ff412c5d78773a03279bdc77a3874
|
refs/heads/master
| 2020-04-29T15:22:30.453074
| 2019-06-06T06:52:49
| 2019-06-06T06:52:49
| 176,226,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# -*- coding:utf-8 -*-
from survey import AnonymousSurvey
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
my_survey.show_question()
print("Enter 'q' at any time to quit.")
while True:
response = input("Language: ")
if response == 'q':
break
my_survey.store_response(response)
print("\nThank you to everyone who participated in the survey!")
my_survey.show_results()
|
[
"ning_ML@126.com"
] |
ning_ML@126.com
|
e56c8974cdd56cc2d05bc44a6990af0d31922a37
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/safer.py
|
3bf006cbfcdbaf4af9ac24ab7cd1eaee344e77e6
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
ii = [('GodwWSL2.py', 1), ('ChanWS.py', 1), ('WilbRLW4.py', 2), ('WilbRLW5.py', 1), ('LeakWTI3.py', 3), ('PeckJNG.py', 1), ('AdamWEP.py', 1), ('WilbRLW2.py', 1), ('ClarGE2.py', 2), ('CarlTFR.py', 3), ('LyttELD.py', 1), ('CookGHP2.py', 1), ('BailJD1.py', 1), ('MarrFDI2.py', 1), ('ClarGE.py', 1), ('LandWPA.py', 1), ('GilmCRS.py', 1), ('NewmJLP.py', 1), ('GodwWLN.py', 1), ('CoopJBT.py', 2), ('SoutRD2.py', 1), ('SoutRD.py', 2), ('DickCSG.py', 1), ('WheeJPT.py', 1), ('MartHRW.py', 1), ('WestJIT.py', 1), ('CoolWHM3.py', 1), ('FitzRNS.py', 1), ('StorJCC.py', 1), ('WilbRLW3.py', 1), ('AinsWRR2.py', 1), ('MartHRW2.py', 1), ('FitzRNS2.py', 5), ('MartHSI.py', 1), ('EvarJSP.py', 3), ('WordWYR.py', 2), ('KeigTSS.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
f7377f4dcd188112ba3335f05acbd16f1b028e75
|
e0d9844e123fa0706388814b9f29758258589487
|
/torch/fx/__init__.py
|
299a3d856477f92494579227c14eea4be406decf
|
[] |
no_license
|
pigpigman8686/seg
|
b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c
|
61c3816f7ba76243a872fe5c5fc0dede17026987
|
refs/heads/master
| 2023-04-10T22:22:35.035542
| 2021-04-22T06:24:36
| 2021-04-22T06:24:36
| 360,398,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,905
|
py
|
# type: ignore
r'''
**This feature is under a Beta release and its API may change.**
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
module = MyModule()
from torch.fx import symbolic_trace
# Symbolic tracing frontend - captures the semantics of the module
symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
# High-level intermediate representation (IR) - Graph representation
print(symbolic_traced.graph)
"""
graph(x):
%param : [#users=1] = self.param
%add_1 : [#users=1] = call_function[target=<built-in function add>](args = (%x, %param), kwargs = {})
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
%clamp_1 : [#users=1] = call_method[target=clamp](args = (%linear_1,), kwargs = {min: 0.0, max: 1.0})
return clamp_1
"""
# Code generation - valid Python code
print(symbolic_traced.code)
"""
def forward(self, x):
param = self.param
add_1 = x + param; x = param = None
linear_1 = self.linear(add_1); add_1 = None
clamp_1 = linear_1.clamp(min = 0.0, max = 1.0); linear_1 = None
return clamp_1
"""
The **symbolic tracer** performs “symbolic execution” of the Python
code. It feeds fake values, called Proxies, through the code. Operations
on theses Proxies are recorded. More information about symbolic tracing
can be found in the :func:`symbolic_trace` and :class:`Tracer`
documentation.
The **intermediate representation** is the container for the operations
that were recorded during symbolic tracing. It consists of a list of
Nodes that represent function inputs, callsites (to functions, methods,
or :class:`torch.nn.Module` instances), and return values. More information
about the IR can be found in the documentation for :class:`Graph`. The
IR is the format on which transformations are applied.
**Python code generation** is what makes FX a Python-to-Python (or
Module-to-Module) transformation toolkit. For each Graph IR, we can
create valid Python code matching the Graph’s semantics. This
functionality is wrapped up in :class:`GraphModule`, which is a
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
``forward`` method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing →
intermediate representation → transforms → Python code generation)
constitutes the Python-to-Python transformation pipeline of FX. In
addition, these components can be used separately. For example,
symbolic tracing can be used in isolation to capture a form of
the code for analysis (and not transformation) purposes. Code
generation can be used for programmatically generating models, for
example from a config file. There are many uses for FX!
Several example transformations can be found at the
`examples <https://github.com/pytorch/examples/tree/master/fx>`__
repository.
'''
from .graph_module import GraphModule
from .symbolic_trace import symbolic_trace, Tracer, wrap
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
|
[
"952361195@qq.com"
] |
952361195@qq.com
|
9989efaeb2829d5148283f702b03e74ab10e785c
|
106bd4733eaf07a2cce1142bf04f729a3374e311
|
/part03/numpy/code/example_4.py
|
884ab825fcd1b0e1651940d3e98b7da3dddf03e0
|
[] |
no_license
|
askmr03/nd-AIPython
|
d2d06e7b07dc2b6683473646ceb465e67b6fc399
|
a0c2979f66013c93eefe799dd75d0dd9400ec942
|
refs/heads/master
| 2021-10-09T08:44:02.525056
| 2018-12-24T15:42:35
| 2018-12-24T15:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# We create a 3 x 4 ndarray full of zeros.
import numpy as np
X = np.zeros((3,4))
# We print X
print()
print('X = \n', X)
print()
# We print information about X
print('X has dimensions:', X.shape)
print('X is an object of type:', type(X))
print('The elements in X are of type:', X.dtype)
'''
X =
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
X has dimensions: (3, 4)
X is an object of type: <class 'numpy.ndarray'>
The elements in X are of type: float64
'''
|
[
"1193094618@qq.com"
] |
1193094618@qq.com
|
a79302a9ae686eca28c7323d67f92a5c18171bb8
|
ffb8e614efd3837b71962c379a0b1b7924aa941d
|
/mot_imgnet/mot_util.py
|
f72436dbc783c62e61b1e973ce8049d14bb52c72
|
[] |
no_license
|
UniLauX/mot_scripts
|
877fb22c532016821d6d68d3c2ca7058e6a22ba8
|
4afcdfee4c0630e6d903ae43cdd8345e0ed6d524
|
refs/heads/master
| 2021-06-17T21:05:37.903723
| 2021-01-21T10:40:43
| 2021-01-21T10:40:43
| 133,614,351
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,463
|
py
|
import _init_paths
from common_lib import create_dir
from common_lib import load_txt_to_strarr
from common_lib import save_strarr_to_txt
from trans_util import rename_copy_images
from trans_util import rename_create_ppmimgs
import numpy as np
import os
debug_flag=True
def load_seq_names(seqmap_path):
str_arr=load_txt_to_strarr(seqmap_path)
seq_names=str_arr[1:]
return seq_names
def generate_jpgimgs(seq_data_dir,seq_names,dest_jpgimgs_dir):
for seq_name in seq_names:
seq_folder_dir=os.path.join(seq_data_dir,seq_name)
seq_img1_dir=os.path.join(seq_folder_dir,'img1')
rename_copy_images(seq_img1_dir,dest_jpgimgs_dir,seq_name)
def generate_ppmimgs(seq_data_dir,seq_names,dest_ppmimgs_dir):
if not os.path.exists(dest_ppmimgs_dir):
create_dir(dest_ppmimgs_dir)
for seq_name in seq_names:
seq_folder_dir=os.path.join(seq_data_dir,seq_name)
seq_img1_dir=os.path.join(seq_folder_dir,'img1')
rename_create_ppmimgs(seq_img1_dir,dest_ppmimgs_dir,seq_name)
def generate_imgsets(seqs_data_dir,seq_names,dest_imgsets_path):
##im_names(.jpg)
str_arr=[]
for seq_name in seq_names:
seq_data_dir=os.path.join(seqs_data_dir,seq_name)
seq_img1_dir=os.path.join(seq_data_dir,'img1')
im_names=os.listdir(seq_img1_dir)
im_names=np.sort(im_names)
for im_name in im_names:
base_im_name=os.path.splitext(im_name)[0]
dest_im_name=seq_name+'_'+base_im_name
str_arr.append(dest_im_name)
dest_im_names=np.array(str_arr)
##save
save_strarr_to_txt(dest_imgsets_path,dest_im_names)
##get annotations when necessary
def generate_annots():
print 'generate_annots....'
# if year==2015:
# opticn one
# else:
# option two
def generate_bmfsets(seq_data_dir,seq_names,dest_imgsets_dir):
dest_ppmsets_dir=os.path.join(dest_imgsets_dir,'Bmf')
if debug_flag:
print dest_ppmsets_dir
##im_names(.ppm)
for seq_name in seq_names:
seq_folder_dir=os.path.join(seq_data_dir,seq_name)
seq_img1_dir=os.path.join(seq_folder_dir,'img1')
im_names=os.listdir(seq_img1_dir)
im_names=np.sort(im_names)
## file_path
dest_bmffile_path=os.path.join(dest_ppmsets_dir,seq_name+'.bmf')
## im_names
str_arr=[]
first_line=str(len(im_names))+" "+'1'
str_arr.append(first_line)
for im_name in im_names:
tmp_im_name=seq_name+'_'+im_name
dest_im_name=tmp_im_name.replace('.jpg','.ppm')
str_arr.append(dest_im_name)
dest_im_names=np.array(str_arr)
##save
## add total_frame_num and view_count(1) at the very begining
save_strarr_to_txt(dest_bmffile_path,dest_im_names)
## need to concise the data format
## write det_arr in MOT-like format( with split_sign)
def write_detarr_as_mot(file_path,det_float_arr,split_sign=','):
print '============================write detarr as mot===================================================='
det_str_arr=det_float_arr.astype(str)
col_num=det_str_arr.shape[1]
with open(file_path, 'w') as f:
for str_row in det_str_arr:
tmp_row=str_row[0]
for col_id in xrange(1,col_num):
tmp_row=tmp_row+split_sign+str_row[col_id]
f.write('{:s}\n'. format(tmp_row))
f.close()
|
[
"UniLau13@gmail.com"
] |
UniLau13@gmail.com
|
7898351cdf2db5e3121b0d53f319ab781f7dc22d
|
1eb837cd5057f729631bf6ff3cf4c54cee8904d5
|
/course/migrations/0001_initial.py
|
3e8c0e9a0bc569dcf0b1efe168a42b70db0e8e50
|
[] |
no_license
|
andaljulistiawan/pi
|
214781b777bfcac862773e800ec0657ee0db231b
|
c8b89b2310fc6e6fd2540b8dc1fcfdf0d3fea188
|
refs/heads/master
| 2020-05-30T07:15:37.044228
| 2016-09-26T12:22:25
| 2016-09-26T12:22:25
| 69,248,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-15 11:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
|
[
"andalwebsite@gmail.com"
] |
andalwebsite@gmail.com
|
d8daf90c06bca790d512d11543a0cd73c5952f96
|
7d768b5be4213c3ac90648d48d1a322fb8c5c433
|
/python_code/chuanzhi/python_advance/19/python进程之间通讯.py
|
523cbae8a97e0e9a695fd5c84ecd6168a1d68bde
|
[] |
no_license
|
googleliyang/gitbook_cz_python
|
7da5070b09e760d5e099aeae468c08e705b7da78
|
c82b7d435dc11016e24cde2bdc4a558f507cb668
|
refs/heads/master
| 2020-04-02T17:47:58.400424
| 2018-12-22T09:48:59
| 2018-12-22T09:48:59
| 154,672,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : python进程之间通讯.py
# @Author: ly
# @Date : 2018/12/8
import multiprocessing
import time
def get_data(q):
"""子进程入口"""
while True:
if q.empty():
print('队列是空的,一会再来看看')
time.sleep(5)
else:
print('获取到的数据是%s' % q.get())
time.sleep(2)
if __name__ == '__main__':
q = multiprocessing.Queue(1000)
c_p = multiprocessing.Process(target=get_data, args=(q,))
c_p.start()
while True:
val = input("请输入")
if q.full():
print('客官已经满了,请稍后再来')
time.sleep(1)
else:
q.put(val)
|
[
"yang.li.gogle@gmail.com"
] |
yang.li.gogle@gmail.com
|
0834469882e2c001722a709bdbeb7a79016dafe3
|
a4538c928e32c698aeca27ef0b87e9bca2917eaf
|
/Hack_A_Roo/Main.py
|
26ad27ef6512fedadde192f420704cefe6520a9e
|
[] |
no_license
|
Tvkqd/Hackathon-F19-IBM2-ConfigPrediction
|
90be47c77e4f1dbc84f8e29d4f8a491d190ed6db
|
a45e1d2b0464e96974f018a6374cd51e9738dd89
|
refs/heads/master
| 2020-09-06T11:09:47.121512
| 2019-11-11T01:15:39
| 2019-11-11T01:15:39
| 220,408,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,504
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import os
from dash.dependencies import Input, Output
import Read_input as grph
import plotly.graph_objs as go
import pandas as pd
import csv
stylesheets = ['style.css']
py_read_input = __import__('Read_input')
mainP = dash.Dash(__name__, external_stylesheets=stylesheets)
mainP.layout = html.Div(children=[
html.H1(children="I DONT KNOW WHAT IS THIS CALL"),
html.Div(children='''
Group Meh...
'''),
dcc.Input(id = 'input', value = '', type = 'text'),
##html.Div(grph.res(grph.read_input_file_Expansion(), grph.read_input_file_Update())),
html.Div(dcc.Input(id='input-box', type='text')),
html.Button('Submit', id='button'),
html.Div(id='container-button-basic',
children='Enter a value and press submit'),
html.Div([html.H1("Customer Requests for Provisioning", style={'textAlign': 'center'}),
dcc.Dropdown(id='my-dropdown',options=[{'label': 'Product 1', 'value': 'Product 1'},{'label': 'Product 2', 'value': 'Product 2'},
{'label': 'Product 3', 'value': 'Product 3'}],
multi=True,value=['Product 1'],style={"display": "block","margin-left": "auto","margin-right": "auto","width": "60%"}),
dcc.Graph(id='my-graph')
], className="container")
])
@mainP.callback(Output('my-graph', 'figure'),
[Input('my-dropdown', 'value')])
def update_graph(selected_dropdown_value):
dropdown = {"Product 1": "Product 1","Product 2": "Product 2","Product 3": "Product 3",}
trace1 = []
trace2 = []
trace3 = []
df = grph.read_input_file_Update()
#df['Job Creation Time'] = pd.to_datetime(df['Job Creation Time'], infer_datetime_format=True).dt.date
df['Job Creation Time'].groupby(df['Job Creation Time'].dt.date).count()
print(len(df))
for product in selected_dropdown_value:
#print(df[df["Product Name"] == 'SUCCESS']["Job Result"])
#print(df[df["Product Name"] == product])
trace1 = [dict(
x=df[df["Product Name"] == product]["Job Creation Time"],
y=df[df["Job Result"] == 'SUCCESS']["Job Result"],
autobinx=True,
autobiny=True,
marker=dict(color='rgb(255, 192, 203)'),
name='Success',
type='histogram',
xbins=dict(
# end =df[df["Product Name"] == product]["Job Completion Time"] ,
size='M1',
# start = df[df["Product Name"] == 'SUCCESS']["Job Creation Time"],
)
)]
trace2 = [dict(
x=df[df["Product Name"] == product]["Job Creation Time"],
y=df[df["Job Result"] == 'FAILURE']["Job Result"],
autobinx=True,
autobiny=True,
marker=dict(color='rgb(0, 0, 0)'),
name='Fail',
type='histogram',
xbins=dict(
# end = '',
size='M1',
# start = ''
)
)]
trace3 = [dict(
x=df[df["Product Name"] == product]["Job Creation Time"],
y=df[df["Job Result"] == 'CANCELLED']["Job Result"],
autobinx=True,
autobiny=True,
marker=dict(color='rgb(152, 251, 152)'),
name='Cancel',
type='histogram',
xbins=dict(
# end = '',
size='M1',
# start = ''
)
)]
traces = [trace1, trace2, trace3]
data = [val for sublist in traces for val in sublist]
figure = {'data': data,
'layout': go.Layout(colorway=["#5E0DAC", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'],
height=600,title=f"{', '.join(str(dropdown[i]) for i in selected_dropdown_value)} Over Time",
xaxis={"title":"Date",
'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', 'step': 'month', 'stepmode': 'backward'},
{'count': 6, 'label': '6M', 'step': 'month', 'stepmode': 'backward'},
{'step': 'all'}])},
'rangeslider': {'visible': True}, 'type': 'date'})}
return figure
if __name__ == '__main__':
mainP.run_server(debug=True)
|
[
"noreply@github.com"
] |
Tvkqd.noreply@github.com
|
0652c6246afc94cc1e497a05839624bc3ae6d545
|
56fcab9393f0ec379e2abb00d2d8eda36f64e823
|
/uintah/kokkos_src_original/.svn/pristine/06/0652c6246afc94cc1e497a05839624bc3ae6d545.svn-base
|
b03266be673a189a9145f3f4ad00ba5776bedd11
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
damu1000/hypre_ep
|
4a13a5545ac90b231ca9e0f29f23f041f344afb9
|
a6701de3d455fa4ee95ac7d79608bffa3eb115ee
|
refs/heads/master
| 2023-04-11T11:38:21.157249
| 2021-08-16T21:50:44
| 2021-08-16T21:50:44
| 41,874,948
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,277
|
# -*- python -*-
# ex: set syntax=python:
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory (although the filename
# can be changed with the --basedir option to 'mktap buildbot master').
# It has one job: define a dictionary named BuildmasterConfig. This
# dictionary has a variety of keys to control different aspects of the
# buildmaster. They are documented in docs/config.xhtml .
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
# the 'slaves' list defines the set of allowable buildslaves. Each element is
# a tuple of bot-name and bot-password. These correspond to values given to
# the buildslave's mktap invocation.
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("Lenny_64", "password"),
BuildSlave("Squeeze_64", "password",max_builds=1),
BuildSlave("Squeeze_32", "password",max_builds=1),
BuildSlave("Inferno", "password",max_builds=1),
BuildSlave("bigmac", "password",max_builds=1),
BuildSlave("GPU-Linux-64bit", "password",max_builds=1),
BuildSlave("Testing", "password",max_builds=1)]
# to limit to two concurrent builds on a slave, use
# c['slaves'] = [BuildSlave("bot1name", "bot1passwd", max_builds=2)]
# 'slavePortnum' defines the TCP port to listen on. This must match the value
# configured into the buildslaves (with their --master option)
c['slavePortnum'] = 9989
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Any class which implements IChangeSource can be
# put here: there are several in buildbot/changes/*.py to choose from.
#svn_poller = SVNPoller(svnurl=source_code_svn_url,
# pollinterval=1*60, # seconds -- every 1 minute
# split_file=split_file_branches)
from buildbot.changes.svnpoller import SVNPoller, split_file_branches
#from buildbot.changes.gitpoller import GitPoller
#source_code_svn_url='https://gforge.sci.utah.edu/svn/uintah/trunk/src'
source_code_svn_url='https://gforge.sci.utah.edu/svn/uintah/'
gpu_svn_url='https://gforge.sci.utah.edu/svn/uintah/branches/gpu_dev'
doc_svn_url='https://gforge.sci.utah.edu/svn/uintah/trunk/doc'
exprlib_git_url='git://software.crsim.utah.edu/ExprLib'
spatialops_git_url='git://software.crsim.utah.edu/SpatialOps'
svn_poller = SVNPoller(svnurl=source_code_svn_url,
pollinterval=1*60, # seconds -- every 1 minute
split_file=split_file_branches)
#svn_poller = SVNPoller(svnurl=source_code_svn_url,
# pollinterval=1*60, # seconds -- every 1 minute
# )
gpu_svn_poller = SVNPoller(svnurl=gpu_svn_url,
pollinterval=1*60, # seconds -- every 1 minute
)
doc_svn_poller = SVNPoller(svnurl=doc_svn_url,
pollinterval=10*60, # seconds -- every 10 minutes
)
#exprlib_git_poller = GitPoller(giturl=exprlib_git_url,
# pollinterval=1*60, # seconds -- every 1 minute
# )
#spatialops_git_poller = GitPoller(giturl=spatialops_git_url,
# pollinterval=1*60, # seconds -- every 1 minute
# )
c['change_source'] = [ svn_poller]
#c['change_source'] = [ svn_poller, doc_svn_poller , gpu_svn_poller]
#c['change_source'] = [ svn_poller, doc_svn_poller,exprlib_git_poller,
# spatialops_git_poller]
####### SCHEDULERS
## configure the Schedulers
from buildbot import scheduler
#testing = scheduler.Scheduler(name="testing-build", branch=None,
# treeStableTimer=1*60, # wait 1 minutes
# builderNames=["Testing-64bit"])
#gpu = scheduler.AnyBranchScheduler(name="gpu-build",
# branches=["branches/gpu_dev"],
# treeStableTimer=1*60, # wait 1 minutes
# builderNames=["GPU-64bit"])
gpu = scheduler.Scheduler(name="gpu-build",branch=None,
treeStableTimer=1*60, # wait 1 minutes
builderNames=["GPU-64bit"])
#gpu = scheduler.Scheduler(name="gpu-build", branch="branches/gpu_dev",
# treeStableTimer=1*60, # wait 1 minutes
# builderNames=["GPU-64bit"])
bigmac = scheduler.Scheduler(name="bigmac-build", branch=None,
treeStableTimer=1*60, # wait 1 minutes
builderNames=["BigMac-OSX"])
quick = scheduler.Scheduler(name="quick-build", branch=None,
treeStableTimer=1*60, # wait 1 minutes
builderNames=["Linux-Debug-64bit"])
#inferno = scheduler.Scheduler(name="inferno-build", branch=None,
# treeStableTimer=1*60, # wait 1 minutes
# builderNames=["inferno-linux"])
quick_32 = scheduler.Dependent(name="quick-build-32",
upstream=quick,
builderNames=["Linux-Debug-32bit-Static-Libraries"])
#quickWasatch = scheduler.Scheduler(name="quick-wasatch", branch=None,
# treeStableTimer=1*60, # wait 1 minutes
# builderNames=["Linux-dbg-static-Wasatch"])
#quickWasatch = scheduler.Periodic(name="quick-wasatch", branch=None,
# periodicBuildTimer=6*60*60, # build every 1 hour
# builderNames=["Linux-dbg-static-Wasatch"])
full = scheduler.Dependent(name="full-build & test",
upstream=quick_32,
builderNames=["Linux-Optimize-Test-64bit"])
#gpu_night = scheduler.Nightly(name='nightly',
# builderNames=['GPU-64bit-nightly'],
# branch="branches/gpu_dev",
# hour=1, minute=1)
gpu_night = scheduler.Nightly(name='nightly', branch=None,
builderNames=['GPU-64bit-nightly'],
hour=1, minute=1)
doc = scheduler.Scheduler(name="doc-build",branch=None,
treeStableTimer=1*60,
builderNames=["Documentation-Builder"])
c['schedulers'] = []
#c['schedulers'] = [ doc, quick, quick_32, full, night ]
c['schedulers'] = [ quick, quick_32, bigmac, full, doc ]
#c['schedulers'] = [ gpu, gpu_night,quick, quick_32, bigmac, full, doc,testing ]
##c['schedulers'] = [ doc, quick, quick_32, quickWasatch, full,testing,gpu,bigmac]
#c['schedulers'] = [ doc, quick, quick_32, full, night ]
####### BUILDERS
# the 'builders' list defines the Builders. Each one is configured with a
# dictionary, using the following keys:
# name (required): the name used to describe this bilder
# slavename (required): which slave to use, must appear in c['bots']
# builddir (required): which subdirectory to run the builder in
# factory (required): a BuildFactory to define how the build is run
# periodicBuildTime (optional): if set, force a build every N seconds
# buildbot/process/factory.py provides several BuildFactory classes you can
# start with, which implement build processes for common targets (GNU
# autoconf projects, CPAN perl modules, etc). The factory.BuildFactory is the
# base class, and is configured with a series of BuildSteps. When the build
# is run, the appropriate buildslave is told to execute each Step in turn.
# the first BuildStep is typically responsible for obtaining a copy of the
# sources. There are source-obtaining Steps in buildbot/steps/source.py for
# CVS, SVN, and others.
from buildbot.process import factory
from buildbot.steps import source, shell, transfer
compiler_env = {'CC' : '/usr/lib/ccache/gcc',
'CXX' : '/usr/lib/ccache/g++',
'F77' : 'gfortran',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
compiler_env_old = {'CC' : 'gcc',
'CXX' : 'g++',
'F77' : 'gfortran',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
gpu_compiler_env = {'CC' : 'gcc',
'CXX' : 'g++',
'F77' : 'gfortran',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
bigmac_compiler_env = {'CC' : 'gcc',
'CXX' : 'g++',
'F77' : '/usr/local/bin/gfortran',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
bigmac_compiler_env_ = {'CC' : 'gcc',
'CXX' : 'g++',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
compiler_env_32 = {'CC' : '/usr/lib/ccache/gcc',
'CXX' : '/usr/lib/ccache/g++',
'F77' : 'gfortran-4.4',
'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
inferno_env = {'SCI_MAKE_BE_QUIET' : 'true',
'LANG' : ''
}
rt_env = {'GOLD_STANDARD': '/usr/local/TestData'}
dbg_configure = ["../src/configure", "--enable-debug", "--enable-sci-malloc"
]
dbg_configure_command = dbg_configure + ["--enable-64bit"]
dbg_configure_command_32 = dbg_configure + ["--enable-static"]
gpu_configure_command = ["../src/configure",
"--build=x86_64-linux-gnu",
"--host=x86_64-linux-gnu",
"--target=x86_64-linux-gnu",
"--with-hypre=/usr/local/hypre",
"--with-boost=/usr/local/boost",
"--with-petsc=/usr/local/petsc",
"--with-mpi=/usr/local/openmpi",
"--with-cuda=/usr/local/cuda",
# "--with-cuda-sdk=/usr/local/NVIDIA_GPU_Computing_SDK",
"--enable-threads=pthreads",
"--enable-optimize=-O3 -mfpmath=sse",
"--enable-64bit",
"USE_WASATCH=yes" ,
"--enable-wasatch_3p"]
test_gpu_configure_command = ["../src/configure",
"--with-hypre=/usr/local/hypre",
"--with-petsc=/usr/local/petsc",
"--with-mpi=/usr/local/openmpi",
"--with-cuda=/usr/local/cuda",
# "--with-cuda-sdk=/usr/local/NVIDIA_GPU_Computing_SDK",
"--enable-optimize"]
bigmac_configure_command = ["../src/configure",
"--with-mpi-include=/opt/local/include/openmpi",
"--with-mpi-lib=/opt/local/lib" ,
"--with-boost=/opt/local" ,
"USE_MPM=yes USE_ICE=yes USE_MPMICE=yes USE_ARCHES=yes" ,
"USE_WASATCH=yes" ,
"--enable-wasatch_3p" ,
"--with-petsc=/usr/local/petsc-2.3.3" ,
"PETSC_ARCH=darwin10.8.0-c-opt" ,
"--with-hypre=/usr/local/hypre-2.7.0b" ,
"--enable-optimize=-O3"]
wasatch_configure_command = ["../src/configure",
"--enable-debug",
"--enable-64bit",
"--with-boost=/usr",
"--enable-wasatch_3p",
"--enable-static",
"--without-fortran",
"USE_MPM=no USE_ICE=no USE_MPMICE=no USE_ARCHES=no USE_MODELS_RADIATION=no",
"USE_WASATCH=yes" ]
opt_configure_command = ["../src/configure",
"--enable-optimize=-O3 -mfpmath=sse",
"--enable-64bit", "--enable-assertion-level=0",
# "--disable-sci-malloc", "--enable-static"
"--disable-sci-malloc"
]
wasatch_configure = ["--enable-wasatch_3p",
"--with-boost=/usr",
"USE_WASATCH=yes"
]
opt_configure_command = opt_configure_command + wasatch_configure
malloc_trace = ["--with-MallocTrace=/usr/local/MallocTrace"]
opt_nightly_configure_command = opt_configure_command + malloc_trace
dbg_nightly_configure_command = dbg_configure_command + wasatch_configure
rt_command = ["../../src/scripts/regression_tester", "-exact", "-restart", "-nice"]
def AddTestStep(fac,test_case,build_type):
fac.addStep(shell.ShellCommand,
description=["Running " + test_case.upper() + " " + build_type + " tests"],
command=rt_command + ["-" + test_case, "-" + build_type],
env=rt_env,
workdir= 'build/'+build_type+'/StandAlone',
warnOnWarnings=True,
timeout=60*60, # timeout after 1 hour
name=test_case + "_test")
tests=["wasatch","arches","examples","models","ice","ice_amr","mpm","mpmice","mpmice_amr","impm","mpmarches"]
#tests=["mpmarches"]
#RT_tests = ["ARCHES", "Examples","ICE", "IMPM", "Models", "MPMARCHES", "MPMF", "MPMICE", "MPM", "UCF", "Wasatch"]
RT_tests = ["ARCHES", "Examples","ICE", "IMPM", "Models", "MPMICE", "MPM", "UCF", "Wasatch"]
def AddTestStepRT(fac,test_case,build_type):
RT_env = {'TEST_COMPONENTS': test_case ,
'WHICH_TESTS': 'NIGHTLYTESTS'}
fac.addStep(shell.ShellCommand,
description=["Running " + test_case.upper() + " " + build_type + " tests"],
command=["make", "runLocalRT"],
env=RT_env,
workdir= 'build/' + build_type,
warnOnWarnings=True,
timeout=60*60, # timeout after 1 hour
name=test_case + "_test")
doc_f = factory.BuildFactory()
#doc_f.addStep(source.SVN, svnurl=doc_svn_url,directory='build/doc',
# mode="update",retry=(10,2))
doc_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/doc",
workdir='build/doc',
mode="update",retry=(10,2))
doc_f.addStep(shell.ShellCommand,
description=["Building documentation"],
command=["./runLatex"],
workdir='build/doc',
warnOnWarnings=False,
warnOnFailure=False,
name="build documentation")
debug_f = factory.BuildFactory()
#debug_f.addStep(source.SVN, svnurl=source_code_svn_url,directory='build/src',
# mode="update",retry=(10,2))
debug_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src',
mode="update",retry=(10,2))
debug_f.addStep(shell.Configure,
command=dbg_configure_command,
env=compiler_env,
workdir='build/dbg')
debug_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=compiler_env,
workdir='build/dbg')
gpu_f = factory.BuildFactory()
#gpu_f.addStep(source.SVN, svnurl=gpu_svn_url,directory='build',
# mode="update",retry=(10,2))
#gpu_f.addStep(source.SVN, baseURL=source_code_svn_url,
# defaultBranch="branches/gpu_dev",workdir='build',
# mode="update",retry=(10,2))
gpu_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",workdir='build/src',
mode="update",retry=(10,2))
gpu_f.addStep(shell.Configure,
command=gpu_configure_command,
env=gpu_compiler_env,
workdir='build/opt')
gpu_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 2"],
env=gpu_compiler_env,
workdir='build/opt')
gpu_f.addStep(shell.ShellCommand,
command=["../src/scripts/make_test_data_link.sh", "/home/csafe-tester/Linux/TestData/opt"],
env=compiler_env,
workdir='build/opt')
AddTestStepRT(gpu_f,"GPU","opt")
# gpu_f.addStep(shell.ShellCommand,
# command=["../src/scripts/make_test_data_link.sh", "/home/csafe-tester/Linux/TestData/opt"],
# env=compiler_env,
# workdir='build/opt')
# for test in RT_tests:
# AddTestStepRT(gpu_f,test,"opt")
gpu_nightly_f = factory.BuildFactory()
#gpu_f.addStep(source.SVN, svnurl=gpu_svn_url,directory='build',
# mode="update",retry=(10,2))
#gpu_nightly_f.addStep(source.SVN, baseURL=source_code_svn_url,
# defaultBranch="branches/gpu_dev",workdir='build',
# defaultBranch="trunk/src",workdir='build',
# mode="update",retry=(10,2))
gpu_nightly_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",workdir='build/src',
mode="update",retry=(10,2))
gpu_nightly_f.addStep(shell.Configure,
command=gpu_configure_command,
env=gpu_compiler_env,
workdir='build/opt')
gpu_nightly_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=gpu_compiler_env,
workdir='build/opt')
gpu_nightly_f.addStep(shell.ShellCommand,
command=["../src/scripts/make_test_data_link.sh", "/home/csafe-tester/Linux/TestData/opt"],
env=compiler_env,
workdir='build/opt')
GPU_tests = RT_tests + ["GPU"]
for test in GPU_tests:
AddTestStepRT(gpu_nightly_f,test,"opt")
# AddTestStepRT(gpu_nightly_f,"Examples","opt")
# AddTestStepRT(gpu_nightly_f,"ICE","opt")
bigmac_f = factory.BuildFactory()
#bigmac_f.addStep(source.SVN, svnurl=source_code_svn_url,directory='build/src',
# mode="update",retry=(10,2))
bigmac_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src',
mode="update",retry=(10,2))
bigmac_f.addStep(shell.ShellCommand,
command=["rm","-rf","CCA",";","rm","-rf","Core"],
env=bigmac_compiler_env,
workdir='build/dbg')
bigmac_f.addStep(shell.Configure,
command=bigmac_configure_command,
env=bigmac_compiler_env,
workdir='build/dbg')
bigmac_f.addStep(shell.ShellCommand,
command=["../src/scripts/mac_zlib_fix"],
env=bigmac_compiler_env,
workdir='build/dbg')
bigmac_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 10"],
env=bigmac_compiler_env,
workdir='build/dbg')
inferno_f = factory.BuildFactory()
#inferno_f.addStep(source.SVN, svnurl=source_code_svn_url,
# directory='build/src', mode="update",retry=(10,2))
inferno_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src', mode="update",retry=(10,2))
inferno_f.addStep(shell.Configure,
command=dbg_configure,
env=inferno_env,
workdir='build/dbg')
inferno_f.addStep(shell.Compile,
command=["../src/scripts/pump_make.sh"],
env=inferno_env,
workdir='build/dbg')
debug_f_32 = factory.BuildFactory()
#debug_f_32.addStep(source.SVN, svnurl=source_code_svn_url,
# directory='build/src', mode="update",retry=(10,2))
debug_f_32.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src', mode="update",retry=(10,2))
debug_f_32.addStep(shell.Configure,
command=dbg_configure_command_32,
env=compiler_env_32,
workdir='build/dbg')
debug_f_32.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=compiler_env_32,
workdir='build/dbg')
#wasatchOnly_f = factory.BuildFactory()
#wasatchOnly_f.addStep(source.SVN, svnurl=source_code_svn_url,
# directory='build/src', mode="update",retry=(10,2))
#wasatchOnly_f.addStep(source.SVN, baseURL=source_code_svn_url,
# defaultBranch="trunk/src",
# workdir='build/src', mode="update",retry=(10,2))
#wasatchOnly_f.addStep(shell.Configure,
# command=wasatch_configure_command,
# env=compiler_env,
# workdir='build/dbg')
#wasatchOnly_f.addStep(shell.Compile,
# command=["python", "../src/scripts/make.py", " 4"],
# env=compiler_env,
# workdir='build/dbg')
optimize_f = factory.BuildFactory()
#optimize_f.addStep(source.SVN, svnurl=source_code_svn_url,
# directory='build/src', mode="update",retry=(10,2))
optimize_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src', mode="update",retry=(10,2))
optimize_f.addStep(shell.Configure,
env=compiler_env,
workdir='build/opt',
command=opt_configure_command)
optimize_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=compiler_env,
workdir='build/opt')
optimize_f.addStep(shell.ShellCommand,
command=["../src/scripts/make_test_data_link.sh", " /usr/local/TestData/opt"],
env=compiler_env,
workdir='build/opt')
#RT_env = {'TEST_COMPONENTS': 'ARCHES Examples ICE IMPM Models MPMARCHES MPMF MPMICE MPM UCF Wasatch',
# 'WHICH_TESTS': 'nightly'}
for test in RT_tests:
AddTestStepRT(optimize_f,test,"opt")
# optimize_f.addStep(shell.ShellCommand,
# command=["python", "../../src/scripts/buildbot_testresults.py",
# test + "-results"],
# env=compiler_env,
# workdir='build/opt/local_RT')
#optimize_f.addStep(shell.ShellCommand,
# command=["make", "runLocalRT"],
# env=RT_env,
# workdir='build/opt')
#optimize_f.addStep(shell.ShellCommand,
# command=["cat", "local_RT/log"],
# env=RT_env,
# workdir='build/opt')
import os, shutil, glob
from buildbot.process.properties import WithProperties
class TransferTestResults(transfer.DirectoryUpload):
def __init__(self,extradir=None,**kwargs):
self.extradir=extradir
transfer.DirectoryUpload.__init__(self,**kwargs)
def finished(self, result):
bname = self.getProperty("buildername")
bnum = self.getProperty("buildnumber")
url = "http://www.uintah.utah.edu:8010/TestResults/"+ bname+"/"+ str(bnum) +"/"
print self.extradir
if self.extradir != None:
url = url + self.extradir
self.addURL("TestResults", url)
result = transfer.DirectoryUpload.finished(self, result)
self.step_status.setText(["uploaded results"])
return result
#optimize_f.addStep(TransferTestResults(workdir='build/opt/StandAlone',
# slavesrc="TestResults",
# masterdest=WithProperties("~/master/public_html/TestResults/%(buildername)s/%(buildnumber)s/"))
# )
night_f = factory.BuildFactory()
#night_f.addStep(source.SVN, svnurl=source_code_svn_url, directory='build/src',
# mode="update",retry=(10,2))
night_f.addStep(source.SVN, baseURL=source_code_svn_url,
defaultBranch="trunk/src",
workdir='build/src',
mode="update",retry=(10,2))
night_f.addStep(shell.Configure,
command=dbg_nightly_configure_command,
env=compiler_env,
workdir='build/dbg')
night_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=compiler_env,
workdir='build/dbg')
for test in tests:
AddTestStep(night_f,test,"dbg")
night_f.addStep(TransferTestResults(workdir='build/dbg/StandAlone',
slavesrc="TestResults",
masterdest=WithProperties("~/master/public_html/TestResults/%(buildername)s/%(buildnumber)s/dbg/"),
extradir="dbg")
)
night_f.addStep(shell.Configure,
command=opt_configure_command,
env=compiler_env,
workdir='build/opt')
night_f.addStep(shell.Compile,
command=["python", "../src/scripts/make.py", " 4"],
env=compiler_env,
workdir='build/opt')
for test in tests:
AddTestStep(night_f,test,"opt")
night_f.addStep(TransferTestResults(workdir='build/opt/StandAlone',
slavesrc="TestResults",
masterdest=WithProperties("~/master/public_html/TestResults/%(buildername)s/%(buildnumber)s/opt/"),
extradir="opt")
)
#### BUILDERS #####
testing = {'name': "Testing-64bit",
'slavename': "Testing",
'builddir': "testing_quick",
'factory': debug_f,
}
gpu = {'name': "GPU-64bit",
'slavename': "GPU-Linux-64bit",
'builddir': "gpu_quick",
'factory': gpu_f,
}
gpu_nightly = {'name': "GPU-64bit-nightly",
'slavename': "GPU-Linux-64bit",
'builddir': "gpu_nightly",
'factory': gpu_nightly_f,
}
bigmac = {'name': "BigMac-OSX",
'slavename': "bigmac",
'builddir': "bigmac_quick",
'factory': bigmac_f,
}
doc_squeeze = {'name': "Documentation-Builder",
'slavenames':[ "Squeeze_64","Inferno"],
# 'slavename': "Squeeze_64",
'builddir': "uintah_doc",
'factory': doc_f,
}
#ql_squeeze_wasatch = {'name': "Linux-dbg-static-Wasatch",
# 'slavename': "Squeeze_64",
# 'builddir': "wasatch_quick",
# 'factory': wasatchOnly_f,
# }
ql_squeeze = {'name': "Linux-Debug-64bit",
'slavename': "Squeeze_64",
'builddir': "uintah_quick",
'factory': debug_f,
}
inferno_builder = {'name': "inferno-linux",
'slavename': "Inferno",
'builddir': "uintah_inferno",
'factory': inferno_f,
}
ql_squeeze_32 = {'name': "Linux-Debug-32bit-Static-Libraries",
# 'slavename': "Squeeze_32",
'slavenames': ["Squeeze_32","Inferno"],
'builddir': "uintah_quick_32",
'factory': debug_f_32,
}
fl_squeeze = {'name': "Linux-Optimize-Test-64bit",
'slavename': "Squeeze_64",
'builddir': "uintah_full",
'factory': optimize_f,
}
nl_squeeze = {'name': "Linux-Debug-Optimize-Test-64bit",
'slavename': "Squeeze_64",
'builddir': "uintah_nightly",
'factory': night_f,
}
c['builders'] = [doc_squeeze, ql_squeeze, ql_squeeze_32, fl_squeeze,bigmac]
#c['builders'] = [doc_squeeze, ql_squeeze, ql_squeeze_wasatch, ql_squeeze_32, inferno_builder, fl_squeeze, nl_squeeze]
#c['builders'] = [doc_squeeze, ql_squeeze, ql_squeeze_32, fl_squeeze, nl_squeeze]
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
c['status'] = []
from buildbot.status import html
c['status'].append(html.WebStatus(http_port="8010",allowForce=True))
from buildbot.status import mail
c['status'].append(mail.MailNotifier(fromaddr="uintah-developer@gforge.sci.utah.edu",
extraRecipients=["uintah-developer@gforge.sci.utah.edu"],
sendToInterestedUsers=True,
lookup="sci.utah.edu",
mode="failing"))
####### DEBUGGING OPTIONS
# if you set 'debugPassword', then you can connect to the buildmaster with
# the diagnostic tool in contrib/debugclient.py . From this tool, you can
# manually force builds and inject changes, which may be useful for testing
# your buildmaster without actually commiting changes to your repository (or
# before you have a functioning 'sources' set up). The debug tool uses the
# same port number as the slaves do: 'slavePortnum'.
c['debugPassword'] = "debugpassword"
# if you set 'manhole', you can ssh into the buildmaster and get an
# interactive python shell, which may be useful for debugging buildbot
# internals. It is probably only useful for buildbot developers. You can also
# use an authorized_keys file, or plain telnet.
#from buildbot import manhole
#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
# "admin", "password")
####### PROJECT IDENTITY
# the 'projectName' string will be used to describe the project that this
# buildbot is working on. For example, it is used as the title of the
# waterfall HTML page. The 'projectURL' string will be used to provide a link
# from buildbot HTML pages to your project's home page.
c['projectName'] = "Uintah"
c['projectURL'] = "http://www.uintah.utah.edu/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.Waterfall page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://www.uintah.utah.edu:8010/"
|
[
"damodars@sci.utah.edu"
] |
damodars@sci.utah.edu
|
|
0424f15c39322d5a2be3e56ef253864e1b0daebc
|
ba7feebada17c625167e6c76486abfee6485a4e5
|
/updateDB.py
|
c89629fc9bce493ecd8a377d261b573d37aaa2af
|
[] |
no_license
|
MetalChief/PySqlite
|
6b7889474b59f1d56839e2913c0d1303900bb8dc
|
7658709e0ebeccc7bb5c8af8edabad7778303e50
|
refs/heads/master
| 2020-12-24T18:51:29.326384
| 2016-04-11T17:55:57
| 2016-04-11T17:55:57
| 55,994,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import sqlite3
conn = sqlite3.connect('test.db')
print("Opened database successfully")
conn.execute("UPDATE COMPANY set SALARY = 25000.00 where ID=1")
conn.commit
print("Total number of rows updated :", conn.total_changes)
cursor = conn.execute("SELECT id, name, address, salary from COMPANY")
for row in cursor:
print("ID = ", row[0])
print("NAME = ", row[1])
print("ADDRESS = ", row[2])
print("SALARY = ", row[3], "\n")
print("Operation done successfully")
conn.close()
|
[
"robert.parrish@emc.com"
] |
robert.parrish@emc.com
|
c1d6a62788febfe17ebcb9c3c93d8704bf333be0
|
89b7eb25e742af4457e73fc9d3e04a111d701032
|
/wals_ml_engine/trainer/wals.py
|
ef0d5ec156ed4bc9859f8f55619896cc6bd95524
|
[] |
no_license
|
jkaartoluoma/recommender
|
43fff3c87ca4eb1c74ef2a6f3c5ddc0495917232
|
270208c2ee0bc12f6c7f3dccccfc17b6b26ee669
|
refs/heads/master
| 2020-04-21T18:13:41.362431
| 2018-12-11T05:45:39
| 2018-12-11T05:45:39
| 169,761,537
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,791
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WALS model core functions."""
import math
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import factorization_ops
def get_rmse(output_row, output_col, actual):
"""Compute rmse between predicted and actual ratings.
Args:
output_row: evaluated numpy array of row_factor
output_col: evaluated numpy array of col_factor
actual: coo_matrix of actual (test) values
Returns:
rmse
"""
mse = 0
for i in xrange(actual.data.shape[0]):
row_pred = output_row[actual.row[i]]
col_pred = output_col[actual.col[i]]
err = actual.data[i] - np.dot(row_pred, col_pred)
mse += err * err
mse /= actual.data.shape[0]
rmse = math.sqrt(mse)
return rmse
def simple_train(model, input_tensor, num_iterations):
"""Helper function to train model on input for num_iterations.
Args:
model: WALSModel instance
input_tensor: SparseTensor for input ratings matrix
num_iterations: number of row/column updates to run
Returns:
tensorflow session, for evaluating results
"""
sess = tf.Session(graph=input_tensor.graph)
with input_tensor.graph.as_default():
row_update_op = model.update_row_factors(sp_input=input_tensor)[1]
col_update_op = model.update_col_factors(sp_input=input_tensor)[1]
sess.run(model.initialize_op)
sess.run(model.worker_init)
for _ in xrange(num_iterations):
sess.run(model.row_update_prep_gramian_op)
sess.run(model.initialize_row_update_op)
sess.run(row_update_op)
sess.run(model.col_update_prep_gramian_op)
sess.run(model.initialize_col_update_op)
sess.run(col_update_op)
return sess
LOG_RATINGS = 0
LINEAR_RATINGS = 1
LINEAR_OBS_W = 100.0
def make_wts(data, wt_type, obs_wt, feature_wt_exp, axis):
"""Generate observed item weights.
Args:
data: coo_matrix of ratings data
wt_type: weight type, LOG_RATINGS or LINEAR_RATINGS
obs_wt: linear weight factor
feature_wt_exp: logarithmic weight factor
axis: axis to make weights for, 1=rows/users, 0=cols/items
Returns:
vector of weights for cols (items) or rows (users)
"""
# recipricol of sum of number of items across rows (if axis is 0)
frac = np.array(1.0/(data > 0.0).sum(axis))
# filter any invalid entries
frac[np.ma.masked_invalid(frac).mask] = 0.0
# normalize weights according to assumed distribution of ratings
if wt_type == LOG_RATINGS:
wts = np.array(np.power(frac, feature_wt_exp)).flatten()
else:
wts = np.array(obs_wt * frac).flatten()
# check again for any numerically unstable entries
assert np.isfinite(wts).sum() == wts.shape[0]
return wts
def wals_model(data, dim, reg, unobs, weights=False,
wt_type=LINEAR_RATINGS, feature_wt_exp=None,
obs_wt=LINEAR_OBS_W):
"""Create the WALSModel and input, row and col factor tensors.
Args:
data: scipy coo_matrix of item ratings
dim: number of latent factors
reg: regularization constant
unobs: unobserved item weight
weights: True: set obs weights, False: obs weights = unobs weights
wt_type: feature weight type: linear (0) or log (1)
feature_wt_exp: feature weight exponent constant
obs_wt: feature weight linear factor constant
Returns:
input_tensor: tensor holding the input ratings matrix
row_factor: tensor for row_factor
col_factor: tensor for col_factor
model: WALSModel instance
"""
row_wts = None
col_wts = None
num_rows = data.shape[0]
num_cols = data.shape[1]
if weights:
assert feature_wt_exp is not None
row_wts = np.ones(num_rows)
col_wts = make_wts(data, wt_type, obs_wt, feature_wt_exp, 0)
row_factor = None
col_factor = None
with tf.Graph().as_default():
input_tensor = tf.SparseTensor(indices=zip(data.row, data.col),
values=(data.data).astype(np.float32),
dense_shape=data.shape)
model = factorization_ops.WALSModel(num_rows, num_cols, dim,
unobserved_weight=unobs,
regularization=reg,
row_weights=row_wts,
col_weights=col_wts)
# retrieve the row and column factors
row_factor = model.row_factors[0]
col_factor = model.col_factors[0]
return input_tensor, row_factor, col_factor, model
|
[
"toni.linnusmaki@power.fi"
] |
toni.linnusmaki@power.fi
|
687d0f7a3232b834f7ae60720c06910ae23fd499
|
14c99d4ab2307b863053f0125be7a5efe6d79247
|
/check_new_version.py
|
60c3f6d0b0232d8ea7207a45187d30b551ac4753
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
rpmfusion/lpf-spotify-client
|
841c9c9ccfd1067fb082caa47d140a6770294b3c
|
c32c9fe09fc201ba9acaae04074ea66d0aa037cb
|
refs/heads/master
| 2023-06-26T03:02:58.172016
| 2023-06-20T10:59:38
| 2023-06-20T10:59:38
| 44,868,080
| 32
| 15
|
NOASSERTION
| 2023-06-20T12:36:33
| 2015-10-24T13:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
#!/usr/bin/python3
""" Warning not complete """
import requests
import re
import os
import subprocess
def runme(cmd, env, cwd='.'):
"""Simple function to run a command and return 0 for success, 1 for
failure. cmd is a list of the command and arguments, action is a
name for the action (for logging), pkg is the name of the package
being operated on, env is the environment dict, and cwd is where
the script should be executed from."""
try:
subprocess.check_call(cmd, env=env, cwd=cwd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
sys.stderr.write('%s failed: %s\n' % (cmd, e))
return 1
return 0
text = "cat spotify-client.spec.in | grep ^Version"
texts = text.split('|')
text0 = texts[0].strip().split(' ')
#print(text0)
text1 = texts[1].strip().split(' ')
#print(text1)
ps1 = subprocess.run(text0, check=True, capture_output=True)
ps2 = subprocess.run(text1, input=ps1.stdout, capture_output=True)
print("Current %s" % ps2.stdout.decode())
html = requests.get('http://repository.spotify.com/pool/non-free/s/spotify-client/')
#print (html.text)
str_mx = re.compile('href="(spotify-client.*?i386.deb)"')
str_mx2 = re.compile('href="(spotify-client.*?amd64.deb)"')
res = str_mx.findall(html.text)
res2 = str_mx2.findall(html.text)
deb32 = res[-1]
deb64 = res2[-1]
regexp = re.compile('spotify-client_(\d{1,2}[.]\d{1,2}[.]\d{1,3}[.]\d{1,3})([.].*)')
(version64, minor64) = regexp.findall(deb64)[0]
print ("deb64 = %s\nVersions: %s %s" % (deb64, version64, minor64))
print ("Latest Version: %s" % version64)
(version32, minor32) = regexp.findall(deb32)[0]
#print ("deb32 = %s\nVersions: %s %s\n" % (deb32, version32, minor32))
print ("Latest deb32 Version: %s \n" % version32)
spec = open('spotify-client.spec.in').read()
#print (spec)
#str_mx3 = re.compile('(Version:\s*) .*')
#spec2 = re.sub(str_mx3, r'\1 %s' % version64, spec)
str_mx4 = re.compile('(Source1:.*?)[.].*')
spec3 = re.sub(str_mx4, r'\1%s' % minor64, spec)
str_mx5 = re.compile('(Source2:.*?/).*')
spec4 = re.sub(str_mx5, r'\1%s' % deb32, spec3)
if spec != spec3:
open('spotify-client.spec.in', 'w').write(spec4)
enviro = os.environ
pkgcmd = ['rpmdev-bumpspec', '-n', version64, '-c', 'Update to %s%s' % (version64, minor64[:10]),
'spotify-client.spec.in']
#pkgcmd = ['rpmdev-bumpspec -n %s -c "Update to %s%s" spotify-client.spec.in' % (version64, version64, minor64[:4])]
if runme(pkgcmd, enviro):
print('error running runme')
pkgcmd = ['rpmdev-bumpspec', '-n', version64, '-c', 'Update to %s%s' % (version64, minor64[:10]),
'lpf-spotify-client.spec'] # 2>/dev/null
if runme(pkgcmd, enviro):
print('error running runme')
print("New version available! ACTION REQUIRED !!!")
print('rfpkg mockbuild -N --default-mock-resultdir --root fedora-38-x86_64-rpmfusion_nonfree')
else:
print("Already updated !")
print('rfpkg ci -c && git show && echo Press enter to push and build; read dummy; rfpkg push && rfpkg build --nowait')
print('git checkout f38 && git merge master && git push && rfpkg build --nowait; git checkout master')
print('git checkout f37 && git merge master && git push && rfpkg build --nowait; git checkout master')
print('git checkout el9 && git merge master && git push && rfpkg build --nowait; git checkout master')
print('git checkout el8 && git merge master && git push && rfpkg build --nowait; git checkout master')
|
[
"sergio@serjux.com"
] |
sergio@serjux.com
|
477669cb5cb0efaf66edaa6ff6aca74521337e3c
|
2505145013cc68207006e45ee9f630f0ac17f86c
|
/打卡/water_catch/Largest_Rectangle_in_Histogram.py
|
0e597db9c6945e7dd324bcb84206cde601961316
|
[] |
no_license
|
pppppwj/leetcode
|
5fb52b278e31780c4ebb05b00ac5a0e01afd97a0
|
dfefac07a6180a5464723e0a7130b75f17dba7ab
|
refs/heads/master
| 2022-11-22T14:04:15.179261
| 2020-07-23T14:01:27
| 2020-07-23T14:01:27
| 275,056,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
from typing import List
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
# build an non-decreasing stack
# store index
stack = [-1]
res = 0 # return value
for i in range(len(heights)):
while stack[-1] != -1 and heights[i] < heights[stack[-1]]:
index = stack.pop()
res = (
res
if res >= heights[index] * (i - stack[-1] - 1)
else heights[index] * (i - stack[-1] - 1)
)
stack.append(i)
while stack[-1] != -1:
index = stack.pop()
res = (
res
if res >= heights[index] * (len(heights) - stack[-1] - 1)
else heights[index] * (len(heights) - stack[-1] - 1)
)
return res
|
[
"pppppwj424@gmail.com"
] |
pppppwj424@gmail.com
|
bf46c1b0edefb0117ae1d4116bf7907adfc36c0a
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/tests/components/zwave_js/test_trigger.py
|
9ba0080667408c3a9c9a78c166227f89fa8e8673
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821
| 2023-02-21T22:38:50
| 2023-02-21T22:38:50
| 218,684,806
| 13
| 7
|
Apache-2.0
| 2023-02-21T23:40:57
| 2019-10-31T04:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 37,326
|
py
|
"""The tests for Z-Wave JS automation triggers."""
from unittest.mock import AsyncMock, patch
import pytest
import voluptuous as vol
from zwave_js_server.const import CommandClass
from zwave_js_server.event import Event
from zwave_js_server.model.node import Node
from homeassistant.components import automation
from homeassistant.components.zwave_js import DOMAIN
from homeassistant.components.zwave_js.helpers import get_device_id
from homeassistant.components.zwave_js.trigger import async_validate_trigger_config
from homeassistant.components.zwave_js.triggers.trigger_helpers import (
async_bypass_dynamic_config_validation,
)
from homeassistant.const import SERVICE_RELOAD
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import async_get as async_get_dev_reg
from homeassistant.setup import async_setup_component
from .common import SCHLAGE_BE469_LOCK_ENTITY
from tests.common import async_capture_events
async def test_zwave_js_value_updated(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test for zwave_js.value_updated automation trigger."""
trigger_type = f"{DOMAIN}.value_updated"
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = dev_reg.async_get_device(
{get_device_id(client.driver, lock_schlage_be469)}
)
assert device
no_value_filter = async_capture_events(hass, "no_value_filter")
single_from_value_filter = async_capture_events(hass, "single_from_value_filter")
multiple_from_value_filters = async_capture_events(
hass, "multiple_from_value_filters"
)
from_and_to_value_filters = async_capture_events(hass, "from_and_to_value_filters")
different_value = async_capture_events(hass, "different_value")
def clear_events():
"""Clear all events in the event list."""
no_value_filter.clear()
single_from_value_filter.clear()
multiple_from_value_filters.clear()
from_and_to_value_filters.clear()
different_value.clear()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# no value filter
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
"action": {
"event": "no_value_filter",
},
},
# single from value filter
{
"trigger": {
"platform": trigger_type,
"device_id": device.id,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"from": "ajar",
},
"action": {
"event": "single_from_value_filter",
},
},
# multiple from value filters
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"from": ["closed", "opened"],
},
"action": {
"event": "multiple_from_value_filters",
},
},
# from and to value filters
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"from": ["closed", "opened"],
"to": ["opened"],
},
"action": {
"event": "from_and_to_value_filters",
},
},
# different value
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "boltStatus",
},
"action": {
"event": "different_value",
},
},
]
},
)
# Test that no value filter is triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "hiss",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 1
assert len(single_from_value_filter) == 0
assert len(multiple_from_value_filters) == 0
assert len(from_and_to_value_filters) == 0
assert len(different_value) == 0
clear_events()
# Test that a single_from_value_filter is triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "ajar",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 1
assert len(single_from_value_filter) == 1
assert len(multiple_from_value_filters) == 0
assert len(from_and_to_value_filters) == 0
assert len(different_value) == 0
clear_events()
# Test that multiple_from_value_filters are triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "closed",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 1
assert len(single_from_value_filter) == 0
assert len(multiple_from_value_filters) == 1
assert len(from_and_to_value_filters) == 0
assert len(different_value) == 0
clear_events()
# Test that from_and_to_value_filters is triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "opened",
"prevValue": "closed",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 1
assert len(single_from_value_filter) == 0
assert len(multiple_from_value_filters) == 1
assert len(from_and_to_value_filters) == 1
assert len(different_value) == 0
clear_events()
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "boltStatus",
"newValue": "boo",
"prevValue": "hiss",
"propertyName": "boltStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 0
assert len(single_from_value_filter) == 0
assert len(multiple_from_value_filters) == 0
assert len(from_and_to_value_filters) == 0
assert len(different_value) == 1
clear_events()
with patch("homeassistant.config.load_yaml", return_value={}):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
async def test_zwave_js_value_updated_bypass_dynamic_validation(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test zwave_js.value_updated trigger when bypassing dynamic validation."""
trigger_type = f"{DOMAIN}.value_updated"
node: Node = lock_schlage_be469
no_value_filter = async_capture_events(hass, "no_value_filter")
with patch(
"homeassistant.components.zwave_js.triggers.value_updated.async_bypass_dynamic_config_validation",
return_value=True,
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# no value filter
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
"action": {
"event": "no_value_filter",
},
},
]
},
)
# Test that no value filter is triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "hiss",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 1
async def test_zwave_js_value_updated_bypass_dynamic_validation_no_nodes(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test value_updated trigger when bypassing dynamic validation with no nodes."""
trigger_type = f"{DOMAIN}.value_updated"
node: Node = lock_schlage_be469
no_value_filter = async_capture_events(hass, "no_value_filter")
with patch(
"homeassistant.components.zwave_js.triggers.value_updated.async_bypass_dynamic_config_validation",
return_value=True,
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# no value filter
{
"trigger": {
"platform": trigger_type,
"entity_id": "sensor.test",
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
"action": {
"event": "no_value_filter",
},
},
]
},
)
# Test that no value filter is NOT triggered because automation failed setup
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "hiss",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 0
async def test_zwave_js_value_updated_bypass_dynamic_validation_no_driver(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test zwave_js.value_updated trigger without driver."""
trigger_type = f"{DOMAIN}.value_updated"
node: Node = lock_schlage_be469
driver = client.driver
client.driver = None
no_value_filter = async_capture_events(hass, "no_value_filter")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# no value filter
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
"action": {
"event": "no_value_filter",
},
},
]
},
)
await hass.async_block_till_done()
client.driver = driver
# Test that no value filter is NOT triggered because automation failed setup
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "boo",
"prevValue": "hiss",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(no_value_filter) == 0
async def test_zwave_js_event(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test for zwave_js.event automation trigger."""
trigger_type = f"{DOMAIN}.event"
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = dev_reg.async_get_device(
{get_device_id(client.driver, lock_schlage_be469)}
)
assert device
node_no_event_data_filter = async_capture_events(hass, "node_no_event_data_filter")
node_event_data_filter = async_capture_events(hass, "node_event_data_filter")
controller_no_event_data_filter = async_capture_events(
hass, "controller_no_event_data_filter"
)
controller_event_data_filter = async_capture_events(
hass, "controller_event_data_filter"
)
driver_no_event_data_filter = async_capture_events(
hass, "driver_no_event_data_filter"
)
driver_event_data_filter = async_capture_events(hass, "driver_event_data_filter")
node_event_data_no_partial_dict_match_filter = async_capture_events(
hass, "node_event_data_no_partial_dict_match_filter"
)
node_event_data_partial_dict_match_filter = async_capture_events(
hass, "node_event_data_partial_dict_match_filter"
)
def clear_events():
"""Clear all events in the event list."""
node_no_event_data_filter.clear()
node_event_data_filter.clear()
controller_no_event_data_filter.clear()
controller_event_data_filter.clear()
driver_no_event_data_filter.clear()
driver_event_data_filter.clear()
node_event_data_no_partial_dict_match_filter.clear()
node_event_data_partial_dict_match_filter.clear()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# node filter: no event data
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "interview stage completed",
},
"action": {
"event": "node_no_event_data_filter",
},
},
# node filter: event data
{
"trigger": {
"platform": trigger_type,
"device_id": device.id,
"event_source": "node",
"event": "interview stage completed",
"event_data": {"stageName": "ProtocolInfo"},
},
"action": {
"event": "node_event_data_filter",
},
},
# controller filter: no event data
{
"trigger": {
"platform": trigger_type,
"config_entry_id": integration.entry_id,
"event_source": "controller",
"event": "inclusion started",
},
"action": {
"event": "controller_no_event_data_filter",
},
},
# controller filter: event data
{
"trigger": {
"platform": trigger_type,
"config_entry_id": integration.entry_id,
"event_source": "controller",
"event": "inclusion started",
"event_data": {"secure": True},
},
"action": {
"event": "controller_event_data_filter",
},
},
# driver filter: no event data
{
"trigger": {
"platform": trigger_type,
"config_entry_id": integration.entry_id,
"event_source": "driver",
"event": "logging",
},
"action": {
"event": "driver_no_event_data_filter",
},
},
# driver filter: event data
{
"trigger": {
"platform": trigger_type,
"config_entry_id": integration.entry_id,
"event_source": "driver",
"event": "logging",
"event_data": {"message": "test"},
},
"action": {
"event": "driver_event_data_filter",
},
},
# node filter: event data, no partial dict match
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "value updated",
"event_data": {"args": {"commandClassName": "Door Lock"}},
},
"action": {
"event": "node_event_data_no_partial_dict_match_filter",
},
},
# node filter: event data, partial dict match
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "value updated",
"event_data": {"args": {"commandClassName": "Door Lock"}},
"partial_dict_match": True,
},
"action": {
"event": "node_event_data_partial_dict_match_filter",
},
},
]
},
)
# Test that `node no event data filter` is triggered and `node event data filter` is not
event = Event(
type="interview stage completed",
data={
"source": "node",
"event": "interview stage completed",
"stageName": "NodeInfo",
"nodeId": node.node_id,
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 1
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that `node no event data filter` and `node event data filter` are triggered
event = Event(
type="interview stage completed",
data={
"source": "node",
"event": "interview stage completed",
"stageName": "ProtocolInfo",
"nodeId": node.node_id,
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 1
assert len(node_event_data_filter) == 1
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that `controller no event data filter` is triggered and `controller event data filter` is not
event = Event(
type="inclusion started",
data={
"source": "controller",
"event": "inclusion started",
"secure": False,
},
)
client.driver.controller.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 1
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that both `controller no event data filter` and `controller event data filter` are triggered
event = Event(
type="inclusion started",
data={
"source": "controller",
"event": "inclusion started",
"secure": True,
},
)
client.driver.controller.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 1
assert len(controller_event_data_filter) == 1
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that `driver no event data filter` is triggered and `driver event data filter` is not
event = Event(
type="logging",
data={
"source": "driver",
"event": "logging",
"message": "no test",
"formattedMessage": "test",
"direction": ">",
"level": "debug",
"primaryTags": "tag",
"secondaryTags": "tag2",
"secondaryTagPadding": 0,
"multiline": False,
"timestamp": "time",
"label": "label",
},
)
client.driver.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 1
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that both `driver no event data filter` and `driver event data filter` are triggered
event = Event(
type="logging",
data={
"source": "driver",
"event": "logging",
"message": "test",
"formattedMessage": "test",
"direction": ">",
"level": "debug",
"primaryTags": "tag",
"secondaryTags": "tag2",
"secondaryTagPadding": 0,
"multiline": False,
"timestamp": "time",
"label": "label",
},
)
client.driver.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 1
assert len(driver_event_data_filter) == 1
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
# Test that only `node with event data and partial match dict filter` is triggered
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 49,
"endpoint": 0,
"property": "latchStatus",
"newValue": "closed",
"prevValue": "open",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 1
clear_events()
# Test that `node with event data and partial match dict filter` is not triggered
# when partial dict doesn't match
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "fake command class name",
"commandClass": 49,
"endpoint": 0,
"property": "latchStatus",
"newValue": "closed",
"prevValue": "open",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
assert len(node_event_data_filter) == 0
assert len(controller_no_event_data_filter) == 0
assert len(controller_event_data_filter) == 0
assert len(driver_no_event_data_filter) == 0
assert len(driver_event_data_filter) == 0
assert len(node_event_data_no_partial_dict_match_filter) == 0
assert len(node_event_data_partial_dict_match_filter) == 0
clear_events()
with patch("homeassistant.config.load_yaml", return_value={}):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
async def test_zwave_js_event_bypass_dynamic_validation(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test zwave_js.event trigger when bypassing dynamic config validation."""
trigger_type = f"{DOMAIN}.event"
node: Node = lock_schlage_be469
node_no_event_data_filter = async_capture_events(hass, "node_no_event_data_filter")
with patch(
"homeassistant.components.zwave_js.triggers.event.async_bypass_dynamic_config_validation",
return_value=True,
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# node filter: no event data
{
"trigger": {
"platform": trigger_type,
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "interview stage completed",
},
"action": {
"event": "node_no_event_data_filter",
},
},
]
},
)
# Test that `node no event data filter` is triggered and `node event data filter` is not
event = Event(
type="interview stage completed",
data={
"source": "node",
"event": "interview stage completed",
"stageName": "NodeInfo",
"nodeId": node.node_id,
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 1
async def test_zwave_js_event_bypass_dynamic_validation_no_nodes(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test event trigger when bypassing dynamic validation with no nodes."""
trigger_type = f"{DOMAIN}.event"
node: Node = lock_schlage_be469
node_no_event_data_filter = async_capture_events(hass, "node_no_event_data_filter")
with patch(
"homeassistant.components.zwave_js.triggers.event.async_bypass_dynamic_config_validation",
return_value=True,
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# node filter: no event data
{
"trigger": {
"platform": trigger_type,
"entity_id": "sensor.fake",
"event_source": "node",
"event": "interview stage completed",
},
"action": {
"event": "node_no_event_data_filter",
},
},
]
},
)
# Test that `node no event data filter` is NOT triggered because automation failed
# setup
event = Event(
type="interview stage completed",
data={
"source": "node",
"event": "interview stage completed",
"stageName": "NodeInfo",
"nodeId": node.node_id,
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(node_no_event_data_filter) == 0
async def test_zwave_js_event_invalid_config_entry_id(
hass: HomeAssistant, client, integration, caplog: pytest.LogCaptureFixture
) -> None:
"""Test zwave_js.event automation trigger fails when config entry ID is invalid."""
trigger_type = f"{DOMAIN}.event"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": trigger_type,
"config_entry_id": "not_real_entry_id",
"event_source": "controller",
"event": "inclusion started",
},
"action": {
"event": "node_no_event_data_filter",
},
}
]
},
)
assert "Config entry 'not_real_entry_id' not found" in caplog.text
caplog.clear()
async def test_async_validate_trigger_config(hass: HomeAssistant) -> None:
"""Test async_validate_trigger_config."""
mock_platform = AsyncMock()
with patch(
"homeassistant.components.zwave_js.trigger._get_trigger_platform",
return_value=mock_platform,
):
mock_platform.async_validate_trigger_config.return_value = {}
await async_validate_trigger_config(hass, {})
mock_platform.async_validate_trigger_config.assert_awaited()
async def test_invalid_trigger_configs(hass: HomeAssistant) -> None:
"""Test invalid trigger configs."""
with pytest.raises(vol.Invalid):
await async_validate_trigger_config(
hass,
{
"platform": f"{DOMAIN}.event",
"entity_id": "fake.entity",
"event_source": "node",
"event": "value updated",
},
)
with pytest.raises(vol.Invalid):
await async_validate_trigger_config(
hass,
{
"platform": f"{DOMAIN}.value_updated",
"entity_id": "fake.entity",
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
)
async def test_zwave_js_trigger_config_entry_unloaded(
hass: HomeAssistant, client, lock_schlage_be469, integration
) -> None:
"""Test zwave_js triggers bypass dynamic validation when needed."""
dev_reg = async_get_dev_reg(hass)
device = dev_reg.async_get_device(
{get_device_id(client.driver, lock_schlage_be469)}
)
assert device
# Test bypass check is False
assert not async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.value_updated",
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
)
await hass.config_entries.async_unload(integration.entry_id)
# Test full validation for both events
assert await async_validate_trigger_config(
hass,
{
"platform": f"{DOMAIN}.value_updated",
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
)
assert await async_validate_trigger_config(
hass,
{
"platform": f"{DOMAIN}.event",
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "interview stage completed",
},
)
# Test bypass check
assert async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.value_updated",
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
},
)
assert async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.value_updated",
"device_id": device.id,
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"from": "ajar",
},
)
assert async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.event",
"entity_id": SCHLAGE_BE469_LOCK_ENTITY,
"event_source": "node",
"event": "interview stage completed",
},
)
assert async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.event",
"device_id": device.id,
"event_source": "node",
"event": "interview stage completed",
"event_data": {"stageName": "ProtocolInfo"},
},
)
assert async_bypass_dynamic_config_validation(
hass,
{
"platform": f"{DOMAIN}.event",
"config_entry_id": integration.entry_id,
"event_source": "controller",
"event": "nvm convert progress",
},
)
|
[
"noreply@github.com"
] |
bdraco.noreply@github.com
|
fd32d0105716ddbb54641b5b71847f1a96c4234d
|
5077bdb6fe6ff5d57033ea860ca0b3fc4b0e9d6d
|
/textutils/urls.py
|
c5489c0fb27af07db9850eeafeb427701fea1391
|
[] |
no_license
|
mahimajain15/DjangoTutorial
|
c15a1fcf89c03a029c24035edd3a8acec68938b9
|
289624975fb604f1ecd0cbaf39fbdd98ad976dc8
|
refs/heads/master
| 2023-04-07T04:17:20.649577
| 2021-04-20T08:00:14
| 2021-04-20T08:00:14
| 358,892,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
"""textutils URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('analyze', views.analyze, name='analyze'),
# path('removepunc/', views.removePunc, name='remP'),
# path('capF/', views.capFirst, name='capF'),
# path('newLR/', views.newLineRemove, name='newLR'),
# path('spcR/', views.spaceRemove, name='spcR'),
# path('chrC/', views.charCount, name='chrC'),
# path('about', views.about, name='about'),
# path('read', views.readText, name='read'),
# path('navigator',views.personalNavigator, name='navigator'),
]
|
[
"mhuj71@gmail.com"
] |
mhuj71@gmail.com
|
7c771cb3bdbe013b24c7b0740b792d74d8cb0291
|
df8669aa05188a1fef96e62d338ccbc53cbee7ef
|
/creator_studio/browser.py
|
4bf4d20ff7794336fc045bad959c2dfae3c61c1f
|
[
"MIT"
] |
permissive
|
ttcakan/creator-studio-cli
|
c09671a820900159da7d8c267c0ff30614b76da9
|
7f41936f7fd4c554fb6980ceb8a71892161be97e
|
refs/heads/master
| 2023-04-08T23:17:41.150797
| 2021-04-25T16:37:52
| 2021-04-25T16:37:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
# Based on https://github.com/timgrossmann/InstaPy/blob/c3ad0c869bcaa402a738804b9a20f55a82c12edb/instapy/browser.py
import os
import zipfile
from os.path import sep
from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
def create_firefox_extension():
ext_path = os.path.abspath(os.path.dirname(__file__) + sep + "firefox_extension")
# safe into assets folder
zip_file = os.path.abspath(os.path.dirname(__file__) + sep) +"extension.xpi"
files = ["manifest.json", "content.js", "arrive.js"]
with zipfile.ZipFile(zip_file, "w", zipfile.ZIP_DEFLATED, False) as zipf:
for file in files:
zipf.write(ext_path + sep + file, file)
return zip_file
def get_browser(browser_profile_path=None):
"""Start the driver and retur it."""
if browser_profile_path is not None:
firefox_profile = webdriver.FirefoxProfile(browser_profile_path)
else:
firefox_profile = webdriver.FirefoxProfile()
browser = webdriver.Firefox(firefox_profile)
browser.implicitly_wait(10) # seconds
# add extenions to hide selenium
browser.install_addon(create_firefox_extension(), temporary=True)
return browser
def explicit_wait_visibility_of_element_located(browser, xpath, timeout=35):
"""Explicitly wait until visibility on element."""
locator = (By.XPATH, xpath)
condition = expected_conditions.visibility_of_element_located(locator)
try:
wait = WebDriverWait(browser, timeout)
result = wait.until(condition)
except TimeoutException:
print("Timeout Exception in explicit wait")
return False
return result
|
[
"arielbmx@gmail.com"
] |
arielbmx@gmail.com
|
baf7d0a020964d53a1bf7820b75b47ea6d2a4374
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2192/60749/255887.py
|
25886eaee6c3c12836597ac57bcd59d71f4b0f4e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
n=int(input())
num_array=[]
for h in range(n):
num_array.append(int(input()))
def create_array(n):
temp=n
res=[]
res.append(n)
while n>0:
n=n-5
res.append(n)
while not n==temp:
n=n+5
res.append(n)
strresult=str(res[0])
for n in range(1,len(res)):
strresult=strresult+" "+str(res[n])
strresult+=" "
return strresult+" \n"
for _ in range(0,len(num_array)):
print(create_array(num_array[_]))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
aa72db0ec08bac80fbbb175811636cc6ec36d50a
|
02a77a02712e3238b9e50d195071c7f25c59fe45
|
/utils/dataset/__init__.py
|
b3f543cceb2bde2881fe9ca264233e81b0339cd0
|
[] |
no_license
|
grayondream/meachinelearning-note
|
21baecc830dafd58d604ba8397c7bfb390cdc9b7
|
76721ac4d7fcbd988322abe3acb00335c6379896
|
refs/heads/master
| 2021-01-04T19:10:37.712166
| 2020-02-29T05:45:51
| 2020-02-29T05:45:51
| 240,723,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
from utils.dataset import mnist_txt
|
[
"grayondream@outlook.com"
] |
grayondream@outlook.com
|
4f5227625d1d2d45ae9fd7172c7c18d96ab64079
|
16518c32f44a3cc32014e77876a4f224e6858239
|
/bullet.py
|
0a0ddd5c98be3235fc6ffde43d462f8ec117bdc5
|
[] |
no_license
|
NOVIAS/python_game
|
d0b40e4844ea94601c3a82e2992c4166a19edc4e
|
02a227a93b64c9f87ded7398f6d700d9cd006f42
|
refs/heads/master
| 2023-08-19T02:25:26.787254
| 2021-10-10T17:12:43
| 2021-10-10T17:12:43
| 415,644,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
# -*- coding: utf-8 -*-
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""一个对飞船发射的子弹进行管理的类"""
def __init__(self, ai_settings, screen, ship):
"""在飞船所处的位置创建一个子弹对象"""
# 在Python2.7中 super(Bullet, self).__init__()
super().__init__()
self.screen = screen
# 在(0,0)处使用 pygame 创建一个表示子弹的矩形,再设置正确的位置
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
# 用飞船的位置定位子弹的位置
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# 用小数表示子弹的位置
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""向上移动子弹"""
# 让子弹按照一定的速度向上发射
self.y -= self.speed_factor
# 绘制子弹在每次运动时的位置信息
self.rect.y = self.y
def draw_bullet(self):
"""在屏幕上绘制子弹"""
pygame.draw.rect(self.screen, self.color, self.rect)
|
[
"275411032@qq.com"
] |
275411032@qq.com
|
f4b94f3e9e0062f993da8ec1d1f47416023b7a9d
|
3bda8ddc752bc853173d34c2f8f6dbef114cb496
|
/client.py
|
f752435cb45b2582c6b752b4e8b93d1527570fa0
|
[] |
no_license
|
DiegoBrando/ParkingTickets
|
fc026d6d48316a32e22ca621bf563d24341adbb0
|
4fb7ebe2dc36fd253e6083bbda15c2ebf7160268
|
refs/heads/master
| 2020-06-27T10:08:55.392912
| 2019-08-07T17:07:53
| 2019-08-07T17:07:53
| 199,923,240
| 0
| 0
| null | 2019-08-07T17:06:51
| 2019-07-31T20:19:07
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
import http.client, subprocess
c = http.client.HTTPConnection('localhost', 8080)
c.request('POST', '/return', '{}')
doc = c.getresponse().read()
print (doc)
|
[
"noreply@github.com"
] |
DiegoBrando.noreply@github.com
|
196fae2abddb6f536ddcc1d1432dead22b6d7268
|
e2dbf4f4101193ce7a3c02d9c02c5518776e73b1
|
/python_scripts/inputs.py
|
e8a445e621912541cf4df5b12d1d5b0bd7a67934
|
[] |
no_license
|
chaitnyap/LearnPython
|
267319cc5f8e9279bf12d653251bb6c42cb126f5
|
d5a56c13fc736e031a59f304f81c7d52b46f0a54
|
refs/heads/master
| 2021-01-22T01:28:02.466679
| 2016-03-01T11:51:30
| 2016-03-01T11:51:30
| 40,639,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
name = raw_input("What's your name? ")
print("Nice to meet you " + name + "!")
age = input("Your age? ")
print("So, you are are already " + str(age) + " years old, " + name + "!")
|
[
"chaitanya.padmanabha@gmail.com"
] |
chaitanya.padmanabha@gmail.com
|
452a9dd951bab0c8c5ab851f0aa3f710bc70ccba
|
01df510517e5afad80b788c36203500854adadf9
|
/post_rec/retrievers/utils.py
|
4386efa24a2fbe64b0f1171d51609abd01110a93
|
[
"MIT"
] |
permissive
|
AnonymousAuthor2013/PostRec
|
903511f16afa0db34ad28e5cd104b52c4f61a656
|
a1461f716d177e28b96ca29d1398f96b5717c1e1
|
refs/heads/master
| 2020-08-16T09:16:54.510718
| 2019-10-16T07:47:39
| 2019-10-16T07:47:39
| 215,484,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Various retriever utilities."""
import regex
import unicodedata
import numpy as np
import scipy.sparse as sp
from sklearn.utils import murmurhash3_32
def getTF_IDF_Data(data_source,ngram=2,hash_size=16777216,tokenizer_name='bert'):
dataName='tf_idf_hash/{}-docs-tfidf-ngram={}-hash={}-tokenizer={}.npz'.format(data_source,ngram,hash_size,tokenizer_name)
return dataName
# ------------------------------------------------------------------------------
# Sparse matrix saving/loading helpers.
# ------------------------------------------------------------------------------
def save_sparse_csr(filename, matrix, metadata=None):
#print("saving shape",np.shape(matrix))
data = {
'data': matrix.data,
'indices': matrix.indices,
'indptr': matrix.indptr,
'shape': matrix.shape,
'metadata': metadata,
}
np.savez(filename, **data)
def load_sparse_csr(filename):
loader = np.load(filename)
matrix = sp.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
return matrix, loader['metadata'].item(0) if 'metadata' in loader else None
# ------------------------------------------------------------------------------
# Token hashing.
# ------------------------------------------------------------------------------
def hash(token, num_buckets):
"""Unsigned 32 bit murmurhash for feature hashing."""
return murmurhash3_32(token, positive=True) % num_buckets
# ------------------------------------------------------------------------------
# Text cleaning.
# ------------------------------------------------------------------------------
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
|
[
"bird@local.com"
] |
bird@local.com
|
6e33cdf39320e6c2df41154a8f321dce893e11fa
|
29d7b72e11e5e8050bf743784a3634ea0f080a32
|
/shop_app/migrations/0014_auto_20210205_0928.py
|
1f442cdfaacd4de60a190dc91e1aec548a6a0797
|
[] |
no_license
|
vincenttpham/dekage
|
8c3633235afee6c207c33d7108b58f3f555d855d
|
a5e380eb98a983de7317558ecca2aa1d906002da
|
refs/heads/main
| 2023-04-21T12:19:03.898801
| 2021-05-09T00:36:56
| 2021-05-09T00:36:56
| 341,022,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
# Generated by Django 2.2 on 2021-02-05 09:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop_app', '0013_auto_20210202_0808'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='address1',
),
migrations.RemoveField(
model_name='order',
name='address2',
),
migrations.RemoveField(
model_name='order',
name='card_number',
),
migrations.RemoveField(
model_name='order',
name='city',
),
migrations.RemoveField(
model_name='order',
name='country',
),
migrations.RemoveField(
model_name='order',
name='cvv',
),
migrations.RemoveField(
model_name='order',
name='expiration',
),
migrations.RemoveField(
model_name='order',
name='name_on_card',
),
migrations.RemoveField(
model_name='order',
name='state',
),
migrations.RemoveField(
model_name='order',
name='zipcode',
),
migrations.RemoveField(
model_name='product',
name='image',
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('img', models.ImageField(upload_to='products/')),
('default', models.BooleanField(default=False)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image', to='shop_app.Product')),
],
),
]
|
[
"crystalpham@Crystals-MacBook-Pro.local"
] |
crystalpham@Crystals-MacBook-Pro.local
|
77acd000454de9ab3b5c63d587d238b9c83ccf21
|
8c86ff06a497951d3dce7013d788d6c66d81ab95
|
/django_bot/wsgi.py
|
f41944068cdccea5b124907c429a0042e0778c41
|
[] |
no_license
|
JbFiras/text_to_speech_django
|
2748277cf970a5b064b838c44bd5778882e2e750
|
5f9bc481600f0524571fad47247c530e96605157
|
refs/heads/master
| 2023-06-03T06:26:27.405605
| 2021-06-23T17:45:59
| 2021-06-23T17:45:59
| 379,662,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for django_bot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_bot.settings')
application = get_wsgi_application()
|
[
"contact@firas-jebari.tn"
] |
contact@firas-jebari.tn
|
31df903c93b46a64456a045d42597dda7821cd78
|
477c5151e660c48bfe3ccb39d2b3db4f597fece0
|
/src/audio_all_methods.py
|
8e706873d43453a4e731ea3797d63424bcfc3a21
|
[] |
no_license
|
pcatrou/microtonal_analysis
|
f114c2d1726d04720ac07270c7ba1975955f218a
|
a49692f87c4f50fd39402b9d88d731a677c5a28d
|
refs/heads/master
| 2023-04-05T23:59:36.787254
| 2021-01-15T21:41:34
| 2021-01-15T21:41:34
| 263,392,964
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,361
|
py
|
import numpy as np
import Consts
def getEnvelope(ampl,timeCoeff):
"""
Get the amplitude envelope of audio data. the time indent is changed to timeCoeff
Parameters:
ampl,timeCoeff
Returns:
filteredEnvelope
"""
absAmpl = abs(ampl)
envelope = np.array([])
#tempEnvelope = np.array([])
for i in range (timeCoeff,len(absAmpl),timeCoeff):
maximum = 0
for j in range(timeCoeff):
maximum = max(absAmpl[i-j],maximum)
envelope = np.append(envelope,[maximum])
envelope = np.append(envelope,[0]) # last mesure added to fit with other data (unless lenght of envelope is missing one element)
return envelope
def getfilteredEnvelope (envelope,intensityThreshold):
"""
filter the amplitude envelope to the specified threshold
Parameters:
envelope,intensityThreshold
Returns:
filteredEnvelope
"""
filteredEnvelope = np.array([])
for i in range(len(envelope)):
if envelope[i]>intensityThreshold:
filteredEnvelope = np.append(filteredEnvelope, envelope[i])
else:
filteredEnvelope = np.append(filteredEnvelope, None)
return filteredEnvelope
# note : faster with np.append than x[i]
def getMaxIndex (dbData,filteredEnvelope,timeCoef,lowFilter,highFilter):
"""
generate the table with the index of the max amplitude for each time indent
of the db data of stft transformed sound signal
use of a filtered envelop to avoid noise data
timeCoeff is the time indent of the envelope
The dbData is also filtered, if the maximum of the dbData at one moment t is inferior to a threshold value
Parameters:
dbData,filteredEnvelope,timeCoef,lowFilter
Returns:
maxIndex
"""
maxIndex = np.array([])
#dbDataToFilter = dbData # to not alter dbData
rev_data = filterHighLowFreq(dbData,lowFilter,highFilter).transpose()
#rev_data = dbData.transpose()
for i in range(len(dbData[1])-timeCoef-1):
if (filteredEnvelope[i*timeCoef] == None or np.max(rev_data[i])<Consts.THRESHOLD_VALUE_FOR_FILTERING): #TODO mettre la valeur en variable ?
maxIndex =np.append(maxIndex,None)
else:
maxIndex =np.append(maxIndex, int(np.argmax(rev_data[i])))
maxIndex = np.append(maxIndex,[None,None])
return maxIndex
def getPitch(dbData,filteredEnvelope,timeCoef,frequencies,lowFilter, highFilter):
"""
transform the maxIndex to frequencies values. Uses getMaxIndex
Parameters:
dbData,filteredEnvelope,timeCoef,frequencies,lowFilter
Returns:
pitchValues
"""
maxIndex = getMaxIndex (dbData,filteredEnvelope,timeCoef,lowFilter,highFilter)
pitchValues = []
for i in range(len(maxIndex)):
if (maxIndex[i] != None and maxIndex[i]>10):
pitchValues.append(frequencies[int(maxIndex[i])])
else:
pitchValues.append(None)
return pitchValues
def getAllNotesVariations(pitchLines):
noteValues = []
for i in range(len(pitchLines)):
noteValuesForOneNote = []
for j in range(len(pitchLines[i])-1):
if pitchLines[i][j] != None :
noteValuesForOneNote.append(pitchLines[i][j])
noteValues.append(noteValuesForOneNote)
return noteValues
def filterHighLowFreq (dbData,lowFilter,highFilter):
"""
abrupt filter of high and low frequencies by setting their value to 0
lowFilter sets the index of minimum frequencies
High freq max are divided by 2
freqData could be sftf signal or its db amplitude
Parameters:
freqData,lowFilter
Returns:
freqData
"""
for i in range(lowFilter):
dbData[i] = 0
for i in range(len(dbData)-highFilter):
dbData[highFilter + i] = 0
"""for i in range(len(dbData)//2+len(dbData)//3):
dbData[len(dbData)-1-i] = 0"""
return dbData
def freqToIndex (IntegerFrequencies,desiredFrequency):
"""
converts the input given in Hz desiredFrequency to the index of this frequency in the freq table.
"""
return np.where(IntegerFrequencies >= desiredFrequency)[0][0]
def divideFreq(db,factor):
"""
divides the frequencies of db amplitude of stft or db data by the integer factor
FFT(x)=FFT_O(2x)
heart of harmonic produc spectrum method
transposed matrix are used to put freq data on rows, to easily access to them
Parameters:
db,factor
Returns:
dividedData
"""
dividedDataTransposed = np.ones((len(db),len(db[0]))).transpose()
dbTransp = db.transpose()
for f in range(len(db)//factor):
for t in range (len(db[0])):
dividedDataTransposed [t][f] = dbTransp[t][factor*f]
dividedData = dividedDataTransposed.transpose()
return dividedData
def harmonicProductSpectrum (db,iterations, coeff):
"""
Makes the product of the db data with frequency divided db data (uses divideFreq) with interations number of iterations.
A corrector factor coeff could be used to ponderate the intensity of the HPS data with initial data
Parameters:
db,iterations, coeff
Returns:
dbHPS+coeff*db
"""
dbHPS = db
for i in range(iterations-1):
dbHPS = (divideFreq(db,i+2))*dbHPS
return dbHPS+coeff*db
|
[
"pierre.catrou@soprasteria.com"
] |
pierre.catrou@soprasteria.com
|
0398c1d28746bd743831ad38b969e6134ea35f2b
|
1d63231cf30e8e78c8d2b8251ecd4510d51bd719
|
/models/Pooling.py
|
6e4682cd285f079ebe35c263550128d48d3127ac
|
[] |
no_license
|
yiique/NNSample
|
f2ab6b5e02f1082e57f75f6abe466aa71e76182c
|
9be59d06103fab03dca2a7883c3fc00c7389f911
|
refs/heads/master
| 2021-01-17T20:02:58.457376
| 2016-10-27T14:04:43
| 2016-10-27T14:04:43
| 68,720,082
| 0
| 0
| null | 2016-10-27T14:04:44
| 2016-09-20T14:27:37
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
__author__ = 'liushuman'
import theano
import theano.tensor as T
class Pooling(object):
def __init__(self):
self.funcs = {'max': self._max,
'mean': self._mean}
def apply(self, func_input, pooling_type='mean', axis=0):
return self.funcs[pooling_type](func_input, axis)
def _max(self, func_input, axis):
func_output = T.max(func_input, axis)
return func_output
def _mean(self, func_input, axis):
func_output = T.mean(func_input, axis)
return func_output
|
[
"liushuman@liushumandeMBP.lan"
] |
liushuman@liushumandeMBP.lan
|
c3434314b45e3c67d995ecb3881a9f05295156d4
|
1c12c0fe769a6a77e163a67adcb66387e30c048a
|
/Objects and Classes/03. Catalogue.py
|
e8a1861af32e4e293430f3a50f4466362f95db82
|
[] |
no_license
|
dvasilev7/PYTHON-FUNDAMENTALS
|
cdf352a8a86476be31155cd01c34efb98cfb2107
|
207e5b395c526a9ab084f49ad54bb538517c2c3f
|
refs/heads/main
| 2023-05-30T07:26:47.516972
| 2021-06-06T23:26:04
| 2021-06-06T23:26:04
| 374,481,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
class Catalogue:
def __init__(self, name):
self.name = name
self.products = []
def add_product(self, product):
self.products.append(product)
return self.products
def get_by_letter(self, first_letter):
letter_product = [product for product in self.products if first_letter == product[0]]
return letter_product
def __repr__(self):
self.products.sort()
result = f"Items in the {self.name} catalogue:\n"
result += "\n".join(self.products)
return result
catalogue = Catalogue("Furniture")
catalogue.add_product("Sofa")
catalogue.add_product("Mirror")
catalogue.add_product("Desk")
catalogue.add_product("Chair")
catalogue.add_product("Carpet")
print(catalogue.get_by_letter("C"))
print(catalogue)
|
[
"noreply@github.com"
] |
dvasilev7.noreply@github.com
|
3186f56bf069df1eb3a2f3a919312fc39e5d8de3
|
31e24feab52e96bc3b23c8e56008b6caa7d85d14
|
/test.py
|
d0e2b0a7dc14c63aec98a9d30e22283aa04564d4
|
[
"MIT"
] |
permissive
|
rbentl/codeastro_workshop
|
d3f9871acafdf94f52ca0d293e18061b36167895
|
9eddc296870c5d83ced95cc8c5d2a37fdcf7b516
|
refs/heads/master
| 2022-11-11T05:13:02.706886
| 2020-06-26T13:18:21
| 2020-06-26T13:18:21
| 274,463,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def test_func(x,y):
'''plot x versus y
arguments:
x (int, float, or array) : x value to be plotted
y (int, flot, or array) : y value to be plotted
returns: plot
'''
plt.plot(x,y)
plt.show()
|
[
"kkosmo@hera.astro.ucla.edu"
] |
kkosmo@hera.astro.ucla.edu
|
3b3da3282e3f27fb4816e1c1a632967c66cf953c
|
f6bb653ea93b5dc1a84902045ff3ce123e25d8e4
|
/pyJoules/energy_device/__init__.py
|
0a237375ba4cc978704518ec5cb2de7d87eeed3b
|
[
"MIT"
] |
permissive
|
danglotb/pyJoules
|
e6969295566ffe349bb38574a00b743812a19180
|
33cbce258b89d7cf4dbfc1f944c7ad3fdcd51fe9
|
refs/heads/master
| 2022-12-17T05:17:01.196809
| 2020-07-15T14:26:53
| 2020-07-15T14:26:53
| 294,377,549
| 0
| 0
| null | 2020-09-10T10:28:29
| 2020-09-10T10:28:28
| null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
# MIT License
# Copyright (c) 2019, INRIA
# Copyright (c) 2019, University of Lille
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .energy_domain import EnergyDomain
from .energy_device import EnergyDevice, NotConfiguredDeviceException
from .energy_device_factory import EnergyDeviceFactory
|
[
"arthur.d-azemar@inria.fr"
] |
arthur.d-azemar@inria.fr
|
d0d33c23e9d9c46de7523b766d1dbb15a15e8608
|
25b3917e9726d099e4b46457c02dd0a74a17e064
|
/views.py
|
d04f1f7cc6c89afc92b0557ea3d3a47c613dfbc1
|
[] |
no_license
|
phzhou/fbpixels
|
3fdf6401edbd1e176c5a6cd05f4cad1059d73807
|
76e6b38b0936523da72f8b602627aca9ce8ef3e9
|
refs/heads/master
| 2020-04-24T07:36:12.401420
| 2015-08-21T10:15:23
| 2015-08-21T10:15:23
| 41,147,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
import os
import json
from django.shortcuts import render
def _load_messages(language):
messages = {}
message_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'static',
'messages',
)
with open(os.path.join(message_path, "{}.json".format(language))) as f:
messages = json.load(f)
return messages
def render_pixel_docs(request, template):
return render(request, template, {'messages': _load_messages('en')})
def overview(request):
return render_pixel_docs(request, 'pixels/overview.html')
def up(request):
return render_pixel_docs(request, 'pixels/up.html')
def wca(request):
return render_pixel_docs(request, 'pixels/wca.html')
def conversion(request):
return render_pixel_docs(request, 'pixels/conversion.html')
def manager(request):
return render_pixel_docs(request, 'pixels/manager.html')
def dpa(request):
return render_pixel_docs(request, 'pixels/dpa.html')
|
[
"phzhou@fb.com"
] |
phzhou@fb.com
|
2a21f263366b6468aa7ef284fc7512d886555783
|
c1a1feb7751a70b2d6be3329db59553f8eca1d50
|
/tictactoe.py
|
0431854e8d6e53f2c6745e1a0ea48256a76dcad5
|
[] |
no_license
|
mohaned2014/tic-tac-toc-AI
|
d87f6c71cb9269eeb4f85a54354972318577088e
|
0b52c92010a2353c1f1cb297369c78dad76dafc1
|
refs/heads/main
| 2023-01-21T02:50:39.878136
| 2020-11-27T16:02:58
| 2020-11-27T16:02:58
| 316,543,466
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
"""
Tic Tac Toe Player
"""
import math
import copy
import itertools
X = "X"
O = "O"
EMPTY = None
OO = 2
def initial_state():
"""
Returns starting state of the board.
"""
return [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
def player(board):
"""
Returns player who has the next turn on a board.
"""
turn = 0
for i in range(len(board)):
for j in range(len(board[0])):
turn += (board[i][j] != EMPTY)
return X if (turn % 2 == 0) else O
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
possible_moves = []
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == EMPTY:
possible_moves.append((i, j))
return possible_moves
def result(board, action):
"""
Returns the board that results from making move (i, j) on the board.
"""
new_board = copy.deepcopy(board)
if new_board[action[0]][action[1]] == EMPTY:
new_board[action[0]][action[1]] = player(board)
else:
raise NameError("Invalid move")
return new_board
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
global X
rows = [0] * 3
cols = [0] * 3
main_diagonal = [0] * 5
rev_diagonal = [0] * 5
def update_cell(_i, _j, _val):
rows[_i] += _val
cols[_j] += _val
main_diagonal[_i - _j + 2] += _val
rev_diagonal[_i + j] += _val
for i in range(3):
for j in range(3):
value = 1 if board[i][j] == X else -1 if board[i][j] == O else 0
update_cell(i, j, value)
for val in itertools.chain(rows, cols, main_diagonal, rev_diagonal):
if val == 3:
return X
if val == -3:
return O
return None
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
if winner(board) is not None:
return True
for i in range(3):
for j in range(3):
if board[i][j] == EMPTY:
return False
return True
def utility(board):
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
winner_player = winner(board)
return 0 if winner_player is None else 1 if winner_player == X else -1
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
if terminal(board):
return None
return maximize(board)[1] if player(board) == X else minimize(board)[1]
def maximize(_board, beta=OO):
"""
Return tuple(max_score_for_x,action=> best_move_for_x)
return score,None in case of terminal board
"""
if terminal(_board):
return utility(_board), None
score = -OO
best_action = None
for action in actions(_board):
# Alpha-beta pruning
if beta <= score:
return score, best_action
new_score, act = minimize(result(_board, action), score)
if new_score > score:
score = new_score
best_action = action
return score, best_action
def minimize(_board, alpha=-OO):
"""
Return tuple(min_score_for_O,action=> best_move_for_O)
return score,None in case of terminal board
"""
if terminal(_board):
return utility(_board), None
score = OO
best_action = None
for action in actions(_board):
# Alpha-beta pruning
if alpha >= score:
return score, best_action
new_score, act = maximize(result(_board, action), score)
if new_score < score:
score = new_score
best_action = action
return score, best_action
|
[
"noreply@github.com"
] |
mohaned2014.noreply@github.com
|
3a613be69385dcd229fc2a155a7e9bda7986a3d5
|
781b2bb35f11d127b3d88ac98574fc7d6dfc9cd9
|
/challenges/bitwise_and.py
|
06508f1002ae93bd4cee3f51e5ff8ebab8012ee5
|
[] |
no_license
|
ritakalach/30-days-of-code
|
4cb2632a466035cade1c7a27a671118d0660778e
|
0c7b664c4b080427bc55f825d50f0db911d37e52
|
refs/heads/master
| 2022-11-08T07:19:34.799772
| 2020-06-22T23:44:45
| 2020-06-22T23:44:45
| 266,652,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
"""
Given set S={1,2,3,...,N}. Find two integers, A and B (where A<B), from set S such that the value of
A&B is the maximum possible and also less than a given integer, K. In this case, & represents the bitwise AND operator.
"""
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
nk = input().split()
n = int(nk[0])
k = int(nk[1])
print(k-1 if ((k-1) | k) <= n else k-2)
|
[
"noreply@github.com"
] |
ritakalach.noreply@github.com
|
64b06549090a2328b37edbbe08647ddc6732d394
|
3954123eb61480728b6d1bf08f5a9c427dfb40f5
|
/models/research/object_detection/Object_detection_mango_video.py
|
c435737993b66fa8bafcefd9a1b35102a6b8dc37
|
[] |
no_license
|
marufm1/CSE327_SEC4_PROJECT
|
2f135c30b49b744cff5c734d1a0200de64177623
|
7059afa277be122f1cee6436e24c5ecdc0d3b265
|
refs/heads/master
| 2020-03-26T17:33:57.235141
| 2018-09-01T22:22:58
| 2018-09-01T22:22:58
| 145,168,366
| 1
| 0
| null | 2018-09-01T22:14:32
| 2018-08-17T21:32:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
######## Video Object Detection Using Tensorflow-trained Classifier #########
#importing necessary packages for video classification
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
sys.path.append("..")
#imported utilities also visualization_utils_db for fruits database
from utils import label_map_util
from utils import visualization_utils_DB as vis_util
# Name of the directory that contains the object detection models
MODEL_NAME = 'inference_graph'
VIDEO_NAME = 'test_mango.mp4'
CWD_PATH = os.getcwd()
#path to the frozen_inference.pb file
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph_mango.pb')
#Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','mango_labelmap.pbtxt')
#path to video file
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
NUM_CLASSES = 1
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#this opens the video
video = cv2.VideoCapture(PATH_TO_VIDEO)
while(video.isOpened()):
ret, frame = video.read()
frame_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
#this function imported from visualization utils is used to draw the box around the images and detect
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.80)
cv2.imshow('Object detector', frame)
if cv2.waitKey(1) == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
[
"41270492+samdi18@users.noreply.github.com"
] |
41270492+samdi18@users.noreply.github.com
|
1af1c0fc788966de47dd6149a7a646e205c1e8b1
|
b40941f5fde614077d921c2cefa8449f1263616d
|
/EJECUTABLEv1.py
|
101678feaca152732740147589b054feb8fd7924
|
[] |
no_license
|
santywin/dropoutv4
|
8729b329ac39b17ab9d83d97883f466d84088b00
|
7be9231cb93a824c56fb4d0ad67203be57008663
|
refs/heads/master
| 2023-07-13T10:52:10.449999
| 2021-08-30T20:54:27
| 2021-08-30T20:54:27
| 401,483,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,717
|
py
|
import downDB
import get_estudiantes
import get_calificaciones
import calcVar
import final as finalpy
#import predRF
import pandas as pd
import predict
import predictEnsemble
import stats_pred
from sqlalchemy import create_engine
#import psycopg2, psycopg2.extras
from numpy import inf
import numpy as np
import features
import os
import warnings
warnings.filterwarnings("ignore")
from sklearn.metrics import accuracy_score
from datetime import date, time, datetime
#Cambiar para marcar el semestre de graduados
SEMESTRE_GRADUADOS = 7
#file = 'estudiantes.csv'
#estudiantes = pd.read_csv(file)
#file = 'calificaciones.csv'
#calificaciones = pd.read_csv(file, encoding = "ISO-8859-1")
print("Prediciendo Dropout...\n")
#descargar datos para estudiantes y calificaciones de la BD
estudiantes,calificaciones = downDB.download()
estudiantes = estudiantes.astype({'id': 'int','codigo_carrera': 'int', 'codigo_malla': 'int', 'total_horas_carrera': 'int', 'total_asignaturas': 'int', 'minnivel': 'int', 'edad': 'int', 'quintil': 'int'})
estudiantes['fecha_nacimiento'] = pd.to_datetime(estudiantes["fecha_nacimiento"], format = '%Y-%m-%d')
estudiantes["estudia_casa"] = estudiantes.apply(lambda x: "SI" if str(x["ciudaddomicilio"]) == str(x["sede"]) else "NO", axis=1)
estudiantes.info()
calificaciones['id'] = calificaciones['id'].astype(int)
calificaciones['codigo_curriculum'] = calificaciones['codigo_curriculum'].astype(int)
calificaciones['codigo_asignatura'] = calificaciones['codigo_asignatura'].astype(int)
calificaciones['nota_final'] = calificaciones['nota_final'].astype(int)
calificaciones['numero_matricula'] = calificaciones['numero_matricula'].astype(int)
calificaciones['codigo_carrera'] = calificaciones['codigo_carrera'].astype(int)
calificaciones['total_horas_asignatura'] = calificaciones['total_horas_asignatura'].astype(int)
calificaciones['anio_curso_asignatura'] = calificaciones['anio_curso_asignatura'].astype(int)
calificaciones['semestre_en_carrera'] = calificaciones['semestre_en_carrera'].astype(int)
calificaciones['semestre_cursa'] = calificaciones['semestre_cursa'].astype(int)
calificaciones['numero_creditos'] = calificaciones['numero_creditos'].astype(int)
calificaciones.info()
calificaciones.loc[calificaciones['semestre_en_carrera']>calificaciones['semestre_cursa'],'semestre_cursa'] = calificaciones['semestre_en_carrera']
#Etiquetamos a los graduados
estudiantes.loc[estudiantes["minnivel"] >= SEMESTRE_GRADUADOS, "graduado"] = "S"
estudiantes.drop("minnivel",1,inplace = True)
print("Prediciendo Dropout...\n")
#estudiantes,calificaciones = downDB.download()
print("Archivos descargados de BD\n")
print("Número total de estudiantes: " + str(estudiantes.id.count()))
print("Número total de estudiantes en el historial: "+ str(calificaciones.drop_duplicates(subset = 'id').id.count()))
cambioEstudiante = get_estudiantes.cambioEst(estudiantes)
cambioHistorial = get_calificaciones.cambCal(calificaciones)
cambioHistorial = cambioHistorial.sort_values(["id","codigo_carrera","semestre_cursa"])
#Asignaturas reprobadas con 0 no se toman en cuenta
cambioHistorial = cambioHistorial[cambioHistorial['nota_final']!=0]
#Número de semestres que lleva cursando cada alumno
asigxSem = cambioHistorial.groupby(["id","codigo_carrera","semestre_cursa"],as_index = False).codigo_asignatura.count()
alumSem = asigxSem.groupby(["id","codigo_carrera"],as_index = False).count().drop("codigo_asignatura",1)
alumSem.columns = ["id","codigo_carrera","Semestres"]
#Cogemos los estudiantes del primer semestre
primerSem = cambioHistorial.groupby(["id","codigo_carrera"], as_index = False).semestre_cursa.min().astype(object)
cambioHistorial1Sem = cambioHistorial.merge(primerSem, on=['id','codigo_carrera','semestre_cursa'], how='inner')
alum1Sem = alumSem[alumSem['Semestres'] == 1]
al1Sem = alumSem[alumSem['Semestres'] >= 1]
#Cogemos el primer y segundo semestre
cambioHistorial2 = pd.concat([cambioHistorial, cambioHistorial1Sem]).drop_duplicates(keep=False)
segSem = cambioHistorial2.groupby(["id","codigo_carrera"], as_index = False).semestre_cursa.min().astype(object)
cambioHistorial2Sem = cambioHistorial.merge(segSem, on=['id','codigo_carrera','semestre_cursa'], how='inner')
cambioHistorial12Sem = pd.concat([cambioHistorial1Sem, cambioHistorial2Sem])
alum2Sem = alumSem[alumSem['Semestres'] == 2]
al2Sem = alumSem[alumSem['Semestres'] >= 2]
#Primer, segundo y tercer semestre
cambioHistorial3 = pd.concat([cambioHistorial, cambioHistorial12Sem]).drop_duplicates(keep=False)
terSem = cambioHistorial3.groupby(["id","codigo_carrera"], as_index = False).semestre_cursa.min().astype(object)
cambioHistorial3Sem = cambioHistorial.merge(terSem, on=['id','codigo_carrera','semestre_cursa'], how='inner')
cambioHistorial123Sem = pd.concat([cambioHistorial12Sem, cambioHistorial3Sem])
alum3Sem = alumSem[alumSem['Semestres'] == 3]
al3Sem = alumSem[alumSem['Semestres'] >= 3]
#Primer, segundo, tercer y cuarto semestre
cambioHistorial4 = pd.concat([cambioHistorial, cambioHistorial123Sem]).drop_duplicates(keep=False)
cuatSem = cambioHistorial4.groupby(["id","codigo_carrera"], as_index = False).semestre_cursa.min().astype(object)
cambioHistorial4Sem = cambioHistorial.merge(cuatSem, on=['id','codigo_carrera','semestre_cursa'], how='inner')
cambioHistorial1234Sem = pd.concat([cambioHistorial123Sem, cambioHistorial4Sem])
alum4Sem = alumSem[alumSem['Semestres'] == 4]
al4Sem = alumSem[alumSem['Semestres'] >= 4]
#Primer, segundo, tercer, cuarto y quinto semestre
cambioHistorial5 = pd.concat([cambioHistorial, cambioHistorial1234Sem]).drop_duplicates(keep=False)
quinSem = cambioHistorial5.groupby(["id","codigo_carrera"], as_index = False).semestre_cursa.min().astype(object)
cambioHistorial5Sem = cambioHistorial.merge(quinSem, on=['id','codigo_carrera','semestre_cursa'], how='inner')
cambioHistorial12345Sem = pd.concat([cambioHistorial1234Sem, cambioHistorial5Sem])
alum5Sem = alumSem[alumSem['Semestres'] == 5]
al5Sem = alumSem[alumSem['Semestres'] >= 5]
#Alumnos a partir de 5 semestre
alumMas5Sem = alumSem[alumSem['Semestres'] > 5]
resultados = pd.DataFrame()
datosBD = pd.DataFrame()
carrerasSinActivos = ""
carrerasSinActivosAUC = ""
abandonoXSem = pd.DataFrame()
abandonoXSem['carrera'] = cambioEstudiante['nombre_carrera'].drop_duplicates().dropna()
abandonoXSem.set_index('carrera',inplace = True)
gradXSem = pd.DataFrame()
gradXSem['carrera'] = cambioEstudiante['nombre_carrera'].drop_duplicates().dropna()
gradXSem.set_index('carrera',inplace = True)
correlation = {}
featureSelectionRF = {}
featureSelection = {}
stats = pd.DataFrame(index='Accuracy AUC Log_loss'.split())
carreras = pd.DataFrame(estudiantes['nombre_carrera'].drop_duplicates().dropna())
try:
os.mkdir('Archivos')
except Exception as e:
print("Error 1 ", e)
pass
for carrera in carreras['nombre_carrera']:
try:
os.mkdir('Archivos/' + carrera)
except Exception as e:
print("Error 2 ", e)
pass
for i in range(6):
if i == 0:
print("Semestre 1")
cambioHistorial1Sem = cambioHistorial1Sem.copy()
cambioHistorialSemActual = cambioHistorial1Sem.copy()
alum1Sem = alum1Sem.copy()
al1Sem = al1Sem.copy()
elif i == 1:
print("Semestre 2")
cambioHistorial1Sem = cambioHistorial12Sem.copy()
cambioHistorialSemActual = cambioHistorial2Sem.copy()
alum1Sem = alum2Sem.copy()
al1Sem = al2Sem.copy()
elif i == 2:
print("Semestre 3")
cambioHistorial1Sem = cambioHistorial123Sem.copy()
cambioHistorialSemActual = cambioHistorial3Sem.copy()
alum1Sem = alum3Sem.copy()
al1Sem = al3Sem.copy()
elif i == 3:
print("Semestre 4")
cambioHistorial1Sem = cambioHistorial1234Sem.copy()
cambioHistorialSemActual = cambioHistorial4Sem.copy()
alum1Sem = alum4Sem.copy()
al1Sem = al4Sem.copy()
elif i == 4:
print("Semestre 5")
cambioHistorial1Sem = cambioHistorial12345Sem.copy()
cambioHistorialSemActual = cambioHistorial5Sem.copy()
alum1Sem = alum5Sem.copy()
al1Sem = al5Sem.copy()
elif i == 5:
print("Semestre 6")
cambioHistorial1Sem = cambioHistorial.copy()
cambioHistorialSemActual = cambioHistorial5Sem.copy()
alum1Sem = alumMas5Sem.copy()
al1Sem = al5Sem.copy()
dropout, dataMedia, credPass = calcVar.calcular(cambioHistorial,cambioEstudiante,cambioHistorial1Sem,cambioHistorialSemActual)
dropout = dropout.reset_index(level=['id', 'codigo_carrera'])
print("Variables de entrada calculadas\n")
final = finalpy.finalData(dataMedia, cambioEstudiante, credPass,dropout)
final = final.drop_duplicates(subset = "id")
print("Datos finales listos\n")
carreras = pd.DataFrame(final['nombre_carrera'])
carreras = carreras.drop_duplicates().dropna()
abandonoXSem['dropoutSem'+str(i+1)] = 0
gradXSem['gradSem'+str(i+1)] = 0
final.dropout = final.dropout.astype(str)
#Se han observado muchos alumnos que han abandonado pero no por temas academicos
dropoutNoAcad = final[((final['rateAprobadas']>=0.95) & (final['dropout'] == "0"))]
dropoutNoAcad = dropoutNoAcad.merge(alum1Sem, on=['id','codigo_carrera'], how='inner')
datosBD = pd.concat([datosBD, dropoutNoAcad],axis = 0)
final = final[~((final['rateAprobadas']>=0.95) & (final['dropout'] == "0"))]
#Quitamos los alumnos que hayan abandonado ya (mejor para la predicción final puesto que predice valores más bajos
#de abandono pero peor para las estadísticas)
"""
final1 = final[final['dropout']== "0"]
final1 = final1.merge(al1Sem, on=['id','codigo_carrera'], how='inner')
final2 = final[final['dropout']!= "0"]
final = pd.concat([final1,final2],ignore_index = True).drop("Semestres",1)
"""
#final.loc[final['porcentaje_carrera']>=np.mean(final['porcentaje_carrera']*2),'dropout'] = "0"
for carrera in carreras['nombre_carrera']:
print("\n#####################################################################\n")
print("\n"+carrera + str(i+1) +"\n")
try:
os.mkdir('Archivos/' + carrera + '/' + carrera + str(i+1))
except Exception as e:
print("Error 3 ", e)
pass
#carrera = "INGENIERÍA CIVIL"
try:
final = final.fillna(0)
final = final.replace([np.inf, -np.inf], 0)
featureSelection[carrera + "" +str(i+1)] = features.selection(final, carrera, i)
accuracy, auc_stat, log_loss, description = stats_pred.statistics(final, carrera)
description.to_excel('Archivos/' + carrera + '/' + carrera + str(i+1) + '/' + "description.xlsx")
stats[carrera + "" +str(i+1)] = 0.1
stats[carrera + "" +str(i+1)]["Accuracy"] = accuracy
stats[carrera + "" +str(i+1)]["AUC"] = auc_stat
stats[carrera + "" +str(i+1)]["Log_loss"] = log_loss
except Exception as e:
print("Error 4 ", e)
stats[carrera + "" +str(i+1)] = 0.0
print("Error")
try:
#featureSelectionRF[carrera + "" +str(i+1)] = features.selection(finalTot, carrera)
#finalData1 = predict.predict(final, carrera)
finalData1 = predictEnsemble.predict(final, carrera, i)
finalData1 = finalData1.merge(alum1Sem, on=['id','codigo_carrera'], how='inner')
abandSemX = finalData1[finalData1['dropout'] == 0]
gradSemX = finalData1[finalData1['dropout'] == 1]
abandonoXSem['dropoutSem'+str(i+1)][carrera] = abandSemX['id'].count()
gradXSem['gradSem'+str(i+1)][carrera] = gradSemX['id'].count()
finalData1['Accuracy'] = accuracy
datosBD = pd.concat([datosBD, finalData1],axis = 0)
except Exception as e:
print("Error 5 ", e)
print("Error predict")
carrerasSinActivos = carrerasSinActivos + carrera+ str(i+1)+ ","
final1 = final[final["nombre_carrera"] == carrera]
final1["codigo_carrera"] = final1["codigo_carrera"].astype(int)
final1 = final1.merge(alum1Sem, on=['id','codigo_carrera'], how='inner')
final1['Accuracy'] = 0.1
datosBD = pd.concat([datosBD, final1],axis = 0, sort = True)
datosBD["dropout"] = datosBD["dropout"].replace("3","0").astype(str)
datosBD.rename(columns={'rate':'dropoutCarrera'}, inplace=True)
datosBD.rename(columns={'dropout':'dropout_RF'}, inplace=True)
datosBD.rename(columns={'dropout_avrg':'dropout'}, inplace=True)
datosBD.drop_duplicates(subset='id', inplace =True)
datosBD["dropout_RF"] = datosBD["dropout_RF"].fillna(-999).astype(str)
##Subimos resultados a la base de datos
print("Subiendo probabilidad de abandono")
abandonoXSem['carreras'] = abandonoXSem.index
engine = create_engine('postgresql://lala:PB2Cx3fDEgfFTpPn@172.16.101.55:5432/lala')
datosBD.to_sql('dropoutporsemestres', engine,if_exists = 'replace', index=False)
abandonoXSem.to_sql('abandono', engine,if_exists = 'replace', index=False)
print("Predicción acabada.")
try:
os.mkdir('Resultados/')
except Exception as e:
print("Error 5 ", e)
print("Error predict")
pass
stats.to_excel("Resultados/metricas.xlsx")
datosBD.to_excel("Resultados/predicciones.xlsx")
abandonoXSem.reset_index(level=0, inplace=True)
abandonoXSem.to_excel("Resultados/dropoutNumberPerSemesterAndDegree.xlsx")
gradXSem.reset_index(level=0, inplace=True)
gradXSem.to_excel("Resultados/graduateNumberPerSemesterAndDegree.xlsx")
|
[
"santywin@gmail.com"
] |
santywin@gmail.com
|
c38d76a201b1ed71fbbbec2cf297732fed8cabf7
|
d54c097b370c23855870ce1608c636c3fc972d40
|
/app.py
|
b06f2f67cb614addf6686666d292f6dd20e0b4de
|
[] |
no_license
|
NirmalaYarra/first-test-repo
|
132331d10d14338a3c83f27ed19cd15a6dd78527
|
08e1b5e860cc49f16011110ef65def6ad684d60a
|
refs/heads/master
| 2022-12-31T04:28:47.847912
| 2020-10-19T09:04:50
| 2020-10-19T09:04:50
| 304,389,798
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
from flask import Flask,request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
items = []
class Item(Resource):
def get(self, name):
item = next(filter(lambda x:x[item] == name,items),None)
return {"item":item} ,200 if item else 404
def post(self, name):
if next(filter(lambda x: x[item] == name, items), None) not None :
return {"message": "item with {} already exists".format(name)},400
data = request.get_jsion()
item = {"name": data["name"], "price": data["price"]}
items.append(item)
return {"student": name}
class Itemlist(Resource):
def get(self):
return {"items": items}
api.add_resource(Item, "/item/<string:name>")
api.add_resource(Itemlist,"/item")
app.run(debug=True)
|
[
"Prettyboy@As-MacBook-Pro.local"
] |
Prettyboy@As-MacBook-Pro.local
|
3361f066f273aebe570ccb9b43cb5ddabf1462e8
|
0f667da9f75d40bde8f7b214252f593c65d995e8
|
/models/end_task_base_model.py
|
d2e1cd6895d37eb62fc338539d84d7d03ff412f0
|
[
"Apache-2.0"
] |
permissive
|
gabrielsluz/vince
|
bf57095753a574bd0a517e30960d799892d80ee1
|
f4e17a2cf70c080a7e01e46d15537e33224c869b
|
refs/heads/master
| 2022-12-29T05:02:39.785180
| 2020-10-14T18:46:51
| 2020-10-14T18:46:51
| 296,189,600
| 0
| 0
|
Apache-2.0
| 2020-09-17T01:43:49
| 2020-09-17T01:43:44
| null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
import abc
from typing import Optional, Dict, Tuple
import numpy as np
import torch
from models.base_model import BaseModel
class EndTaskBaseModel(BaseModel, abc.ABC):
def loss(self, network_outputs: Optional[Dict]) -> Dict[str, Optional[Tuple[float, torch.Tensor]]]:
raise NotImplementedError
def get_metrics(self, network_outputs: Optional[Dict]) -> Dict[str, Optional[float]]:
raise NotImplementedError
def get_image_output(self, network_outputs) -> Dict[str, np.ndarray]:
raise NotImplementedError
|
[
"xkcd@cs.washington.edu"
] |
xkcd@cs.washington.edu
|
06c2df76eff3179105e706f44548eaad7bca81d2
|
8b00abf4cc113c291971f55c4d4ba70bbf6bb1aa
|
/lesson_06/decoTest.py
|
03021940788ed7171b766814869105f0c836c03e
|
[] |
no_license
|
leisuchu/python
|
2a20619afe64afc81c0663ea518747b56beeb659
|
e2f0acd21482e8d9c87f0ffa7936cc57efc057a9
|
refs/heads/master
| 2022-11-27T22:48:36.102571
| 2020-08-06T13:09:53
| 2020-08-06T13:09:53
| 284,967,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# 修饰器学习
import time
def spendTime(func):
def warrer():
start = time.time()
func()
end = time.time()
return end - start
return warrer
@spendTime
def ctForLoop():
ls = [];
for i in range(1000000):
ls.append(i)
@spendTime
def newForLoop():
[i for i in range(1000000)]
c = ctForLoop();
n = newForLoop();
print('old: ', c, 'new: ', n)
|
[
"363126838@qq.com"
] |
363126838@qq.com
|
4e46b239e76f67a7f5b3f691893a6d24bf7113d4
|
9f1ad344da83b4b02a05bc4ba7e163b957191323
|
/son-fsm-examples/monitoring/test/alert.py
|
7e0e148b67f10811b4566892bceeaec5baa1b2f3
|
[
"Apache-2.0"
] |
permissive
|
mehraghdam/son-sm
|
3340e6c84da0ef157b9db6cd93363765f5a29e0a
|
f276018856ce9332b2d3530bab6a56989c991028
|
refs/heads/master
| 2020-05-25T18:51:42.681954
| 2017-03-02T15:43:03
| 2017-03-02T15:43:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,623
|
py
|
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
import json
import time
import os
from sonmanobase import messaging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("son-mano-fakeslm")
LOG.setLevel(logging.DEBUG)
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
class fakealert(object):
def __init__(self):
self.name = 'fake-alert'
self.version = '0.1-dev'
self.description = 'description'
LOG.info("Starting alert:...")
# create and initialize broker connection
self.manoconn = messaging.ManoBrokerRequestResponseConnection(self.name)
self.path_descriptors = 'test/test_descriptors/'
self.end = False
self.publish_nsd()
self.run()
def run(self):
# go into infinity loop
while self.end == False:
time.sleep(1)
def publish_nsd(self):
LOG.info("Sending alert request")
message = {"exported_instance": "fw-vnf","core": "cpu","group": "development","exported_job": "vnf","value": "1",
"instance": "pushgateway:9091","job": "sonata","serviceID": "263fd6b7-8cfb-4149-b6c1-fb082553ca71",
"alertname": "mon_rule_vm_cpu_usage_85_perc","time": "2016-09-13T17:29:22.807Z","inf": "None","alertstate": "firing",
"id": "01ccc69f-c925-42f5-9418-e1adb075527e","monitor": "sonata-monitor"}
self.manoconn.publish('son.monitoring',json.dumps(message))
def main():
fakealert()
if __name__ == '__main__':
main()
|
[
"hadi_ksr_2004@yahoo.com"
] |
hadi_ksr_2004@yahoo.com
|
5ad6e2afea33ec765d6f2204acc87b47403a5697
|
804d501d0e12981dd9531e21f98c4362c88a820f
|
/SevenWAYS/SevenWAYS/settings.py
|
92bc20c0729b716e55251af86957713b1a5af8b4
|
[] |
no_license
|
Farad2020/SevenWAYS
|
1157c1f3c91803c1fefc4430aa236409e935232d
|
849681b8a55b71089140bd8cf044c2a2d14a3a40
|
refs/heads/master
| 2022-11-21T10:54:42.807414
| 2020-07-17T23:57:47
| 2020-07-17T23:57:47
| 280,533,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,276
|
py
|
"""
Django settings for SevenWAYS project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's1c7b#)^(u6h+b*ms_wkxa5$evvg)q%r2-9_%*mim2b#7zd8_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'MainApp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SevenWAYS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SevenWAYS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'Media')
MEDIA_URL = '/media/'
|
[
"fnirc@mail.ru"
] |
fnirc@mail.ru
|
847a494f7e3525d879724351003eb1bde72ac82d
|
45e49fe63320b8dc0b1e7f9f82896b2ac4163a95
|
/test_helpers/tf_test_hit.py
|
dd8a28ca113a99ed87f4c5bec8853dafd7931b76
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
MyOwnClone/tweak_af
|
67b6a4a0d0171e1ffa5278fdc853a4410a298f21
|
d18ced0522a980e52edf65792f897ecc58e7b33d
|
refs/heads/master
| 2022-12-15T10:53:54.658058
| 2020-09-14T19:16:50
| 2020-09-14T19:16:50
| 292,057,648
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from tweak_af import tf
def dummy_function():
print("I quit!")
return 42
while True:
returned_value = tf(lambda: dummy_function())
if returned_value == 42:
exit(0)
|
[
"tomas.vymazal@gmail.com"
] |
tomas.vymazal@gmail.com
|
060350e3a8da78b116c73280d804e9458e73c258
|
4ca44b7bdb470fcbbd60c2868706dbd42b1984c9
|
/20.03.29/2016_calendar_5515.py
|
cad16966dac33cc4e9e100aaa95a513481fde389
|
[] |
no_license
|
titiman1013/Algorithm
|
3b3d14b3e2f0cbc4859029eb73ad959ec8778629
|
8a67e36931c42422779a4c90859b665ee468255b
|
refs/heads/master
| 2023-06-29T17:04:40.015311
| 2021-07-06T01:37:29
| 2021-07-06T01:37:29
| 242,510,483
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import sys; sys.stdin = open('2016_calendar.txt', 'r')
for tc in range(1, int(input())+1):
m, d = map(int, input().split())
# 1월 1일이 금요일 / 2월을 29일까지
# 월화수목 ... => 0 1 2 3 ...
list_31 = [1, 3, 5, 7, 8, 10, 12]
list_30 = [4, 6, 9, 11]
list_29 = [2]
start = 4
# 월 지날때 일수 더해주는 계산
for i in range(1, m):
if i in list_31:
start += 31
elif i in list_30:
start += 30
elif i in list_29:
start += 29
start += d
print(f'#{tc} {(start-1)%7}')
# print((366-1)%7)
|
[
"hyunsukr1013@gmail.com"
] |
hyunsukr1013@gmail.com
|
45d19ee45e16fcf2ba6377c39776d4d1745ff31a
|
171ea5737fa9cb0ef763e0ba20847e1ba61c0199
|
/assignment2/ie590/layer_utils.py
|
88538bb217dc260dced7a903168bc2b84b4f8e54
|
[] |
no_license
|
ShuzhanSun/IE59000_Deep_Learning_In_Machine_Vision
|
0be6d2eefe3ed203c385b0995cca426b89acb566
|
ef9c823277c69d4f25487a4db3c48fddd1ec1107
|
refs/heads/master
| 2022-12-08T10:31:50.314933
| 2019-12-16T03:01:35
| 2019-12-16T03:01:35
| 209,534,434
| 0
| 0
| null | 2022-11-22T02:41:19
| 2019-09-19T11:17:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,487
|
py
|
from ie590.layers import *
from ie590.fast_layers import *
def affine_leaky_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a Leaky ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the Leaky ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, leaky_relu_cache = leaky_relu_forward(a)
cache = (fc_cache, leaky_relu_cache)
return out, cache
def affine_leaky_relu_backward(dout, cache):
"""
Backward pass for the affine-leaky_relu convenience layer
"""
fc_cache, leaky_relu_cache = cache
da = leaky_relu_backward(dout, leaky_relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def affine_bn_leaky_relu_forward(x, w, b, gamma, beta, bn_param):
"""
Convenience layer that performs an affine transform - a batch normalization - a Leaky ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta: paramters used in the batch normalization
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
"""
a, fc_cache = affine_forward(x, w, b)
a, bn_cache = batchnorm_forward(a, gamma, beta, bn_param)
out, leaky_relu_cache = leaky_relu_forward(a)
cache = (fc_cache, bn_cache, leaky_relu_cache)
return out, cache
def affine_bn_leaky_relu_backward(dout, cache):
"""
Backward pass for the affine_bn_leaky_relu convenience layer
"""
fc_cache, bn_cache, leaky_relu_cache = cache
da = leaky_relu_backward(dout, leaky_relu_cache)
da, dgamma, dbeta = batchnorm_backward_alt(da, bn_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db, dgamma, dbeta
def affine_ln_leaky_relu_forward(x, w, b, gamma, beta, bn_param):
"""
Convenience layer that performs an affine transform - a layer normalization - a Leaky ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta: paramters used in the batch normalization
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
"""
a, fc_cache = affine_forward(x, w, b)
a, bn_cache = layernorm_forward(a, gamma, beta, bn_param)
out, leaky_relu_cache = leaky_relu_forward(a)
cache = (fc_cache, bn_cache, leaky_relu_cache)
return out, cache
def affine_ln_leaky_relu_backward(dout, cache):
"""
Backward pass for the affine_ln_leaky_relu convenience layer
"""
fc_cache, bn_cache, leaky_relu_cache = cache
da = leaky_relu_backward(dout, leaky_relu_cache)
da, dgamma, dbeta = layernorm_backward(da, bn_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db, dgamma, dbeta
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_bn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
an, bn_cache = spatial_batchnorm_forward(a, gamma, beta, bn_param)
out, relu_cache = relu_forward(an)
cache = (conv_cache, bn_cache, relu_cache)
return out, cache
def conv_bn_relu_backward(dout, cache):
conv_cache, bn_cache, relu_cache = cache
dan = relu_backward(dout, relu_cache)
da, dgamma, dbeta = spatial_batchnorm_backward(dan, bn_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db, dgamma, dbeta
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
|
[
"sun630@purdue.edu"
] |
sun630@purdue.edu
|
d04e54f5130361e3fe585a2ba187137808d4ab98
|
9292ccf53497f168e9e7db404c8ca9bc906f5e95
|
/Geometry/223_rectangleArea.py
|
a88e3cd50da3f75aeffa36bb8823589e963a9a41
|
[] |
no_license
|
ZheyuWalker/Leetcode-in-python
|
510a25eded77fe2e6d8192b15da9769b445e62f1
|
d99019d07b2178bb17f0d1f7a6eafc69c2e5cdd9
|
refs/heads/master
| 2023-01-23T17:02:09.412318
| 2023-01-21T14:48:46
| 2023-01-21T14:48:46
| 150,246,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
'''
Given the coordinates of two rectilinear rectangles in a 2D plane, return the total area covered by the two rectangles.
The first rectangle is defined by its bottom-left corner (ax1, ay1) and its top-right corner (ax2, ay2).
The second rectangle is defined by its bottom-left corner (bx1, by1) and its top-right corner (bx2, by2).
'''
class Solution:
def computeArea(self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int) -> int:
if ax1 < bx1:
left_box_x = (ax1, ax2)
right_box_x = (bx1, bx2)
else:
left_box_x = (bx1, bx2)
right_box_x = (ax1, ax2)
if ay1 < by1:
down_box_y = (ay1, ay2)
up_box_y = (by1, by2)
else:
down_box_y = (by1, by2)
up_box_y = (ay1, ay2)
area1 = (ay2-ay1) * (ax2-ax1)
area2 = (by2-by1) * (bx2-bx1)
# 左框的右边界 < 右框的左边界 或 下框的上边界 < 上框的下边界
if left_box_x[1] <= right_box_x[0] or down_box_y[1] <= up_box_y[0]:
common_area = 0
else:
x_len = min(left_box_x[1], right_box_x[1]) - right_box_x[0]
y_len = min(down_box_y[1], up_box_y[1]) - up_box_y[0]
common_area = x_len * y_len
return area1 + area2 - common_area
|
[
"zheyu_chaser@foxmail.com"
] |
zheyu_chaser@foxmail.com
|
143612b36b65d1518138ee99aaacbdfe96a36ab9
|
c1c9d9e52727a3480a4f3255f53920d9c78b68ba
|
/cis498/mongodb/customers.py
|
1e7d0ff8af630be1cff20bdc591b6af12eca7347
|
[] |
no_license
|
carlosmoreno2020/cis498Final
|
f6ee4afe699adcf3a4fe87fbfbb94b93d5ebcdf9
|
757802e8e7f8a1b060d7608a65b59fe0ca079202
|
refs/heads/master
| 2022-07-14T18:30:15.777912
| 2020-05-12T00:21:16
| 2020-05-12T00:21:16
| 263,181,120
| 0
| 0
| null | 2020-05-12T00:12:11
| 2020-05-11T23:22:31
| null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
from cis498.mongodb.mongoclient import MongoClientHelper
class Customers:
# Established mongo connection and customers collection
def __init__(self):
self.mc = MongoClientHelper()
self.customers_db = self.mc.db['customers']
def createCustomer(self, form):
name = form.cleaned_data.get('name')
email = form.cleaned_data.get('email').lower()
address = form.cleaned_data.get('address')
phone_number = form.cleaned_data.get('phonenumber')
customer = {"name": name,
"email": email,
"address": address,
"phonenumber": phone_number}
self.customers_db.insert_one(customer)
# def generateCustomerReport(dateRange)
# def getCustomerHistory()
# This method will update customer the customer order list for maintaining order history
def updateCustomerOrders(self, email, uuid):
customer_query = {"email": email.lower()}
customer = self.customers_db.find_one(customer_query)
# Use some try/catch logic to check for existing orders, if it breaks its the customers first order
try:
customer['orders'].append(uuid)
self.customers_db.update_one(customer_query, {"$set": {"orders": customer['orders']}})
except KeyError:
uuidList = [uuid]
self.customers_db.update_one(customer_query, {"$set": {"orders": uuidList}})
def findCustomerByEmail(self, email):
customer_query = {"email": email.lower()}
result = self.customers_db.find_one(customer_query)
return Customer(result['name'], result['email'], result['address'], result['phonenumber'], result['orders'])
class Customer:
def __init__(self, name, email, address, phone_number, orders):
self.name = name
self.email = email
self.address = address
self.phone_number = phone_number
self.orders = orders
|
[
"kridler240@gmail.com"
] |
kridler240@gmail.com
|
303a75cf73d6d544d98ad4d47583f62fd528836d
|
8fef7e2eced3ac03018d56cb2c2026ef17b98fda
|
/lab_14_1.py
|
f23690d52fae5c505e7ccc893fadf23d6a5e3468
|
[] |
no_license
|
Igor-Polatajko/python-labs
|
089536edf79a35e29fd613733c052314fb159b09
|
091b5c5d798414889c1919d6507373d848d57614
|
refs/heads/master
| 2020-08-31T22:44:34.529776
| 2020-01-04T00:25:49
| 2020-01-04T00:25:49
| 218,804,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,249
|
py
|
#!/usr/bin/env python
import sqlite3
ITEM_PRIORITY = {'1': 'HIGH (1)', '2': 'NORMAL (2)', '3': 'LOW (3)'}
class ToDoItem:
def __init__(self, item_id, title, content, priority, completed):
self.item_id = item_id
self.title = title
self.content = content
self.priority = priority
self.completed = completed
@staticmethod
def db_store(method):
def wrapper(*args, **kwargs):
method(*args, **kwargs)
DbConnector().db.commit()
return wrapper
@staticmethod
def db_read(method):
def wrapper(*args, **kwargs):
results = method(*args, **kwargs).fetchall()
return [ToDoItem(result[0], result[1], result[2], result[3], result[4]) for result in results]
return wrapper
class DbConnector:
_instance = None
def __new__(cls):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self):
if hasattr(self, 'db'):
return
self.db = sqlite3.connect("todo_items")
self.db.cursor().execute(
'CREATE TABLE IF NOT EXISTS todo_items ('
'id integer PRIMARY KEY,'
'title text NOT NULL,'
'content text NOT NULL,'
'priority text NOT NULL,'
'completed integer NOT NULL)')
self.db.commit()
class ToDoItemDao:
@ToDoItem.db_store
def __init__(self):
self.db = DbConnector().db
@ToDoItem.db_read
def find_by_field(self, field_name, field_value):
return self.db.cursor().execute(f"SELECT * FROM todo_items WHERE {field_name} LIKE ?", (f'%{field_value}%',))
@ToDoItem.db_read
def find_all(self):
return self.db.cursor().execute("SELECT * FROM todo_items")
@ToDoItem.db_store
def create(self, item):
self.db.cursor().execute("INSERT INTO todo_items (title, content, priority, completed)"
" VALUES (?, ?, ?, ?)", (item.title, item.content, item.priority, item.completed))
@ToDoItem.db_store
def update(self, item):
self.db.cursor().execute("UPDATE todo_items SET title = ?, content = ?, priority = ?, "
"completed = ? WHERE id = ?",
(item.title, item.content, item.priority,
item.completed, item.item_id))
@ToDoItem.db_store
def delete(self, item_id):
self.db.cursor().execute("DELETE FROM todo_items WHERE id = ?", (item_id,))
class View:
def show_hint(self):
print('\n###############################################\n'
'type [help] - to see all the commands available\n'
'type [exit] - to exit\n'
'###############################################\n')
def show_help(self):
print('\n################ Commands ####################\n'
'list - list all the items\n'
'add - add new todo\n'
'rm item_id - remove todo\n'
'find field value - find by field value\n'
'update item_id - find by field value\n'
'complete item_id - mark as finished\n'
'uncomplete item_id - unmark finished\n'
'help - see all the commands available\n'
'exit - exit the program\n'
'###############################################\n')
def item_view(self, item=None):
old_title = ''
old_content = ''
old_priority = ''
completed = 'No'
if item:
old_title = f' ({item.title})'
old_content = f' ({item.content})'
old_priority = f' ({item.priority})'
completed = item.completed
view_type = "Update" if item else "Add"
print(f"### {view_type} item: ###")
title = input(f"Enter title{old_title}: ")
content = input(f"Enter content{old_content}: ")
while True:
priority = input(f"Enter priority{old_priority}: ")
if priority in ('1', '2', '3'):
break
print("Error! Valid priority values: 1, 2, 3")
return ToDoItem(None, title, content, ITEM_PRIORITY[priority], completed)
def show_list(self, data):
if len(data) < 1:
print("Collection is empty!")
return
headers = {d: self._get_max_length(data, d, len(d)) for d in data[0].__dict__.keys()}
print('##### List #####')
for header in headers.keys():
print(f"{header.capitalize():<{headers[header]}} | ", end='')
print()
for d in data:
for header in headers.keys():
d_dict = d.__dict__
if header in d_dict.keys():
col = d_dict[header]
else:
col = ''
print(f"{col:<{headers[header]}} | ", end='')
print()
print()
def _get_max_length(self, items, field, header_length):
items_dicts = [item.__dict__ for item in items]
length_list = [len(str(item[field])) for item in items_dicts if field in item.keys()]
length_list.append(header_length)
return max(length_list)
class ViewHandler:
DB_COLUMNS_ADAPTER = {'item_id': 'id', 'title': 'title', 'content': 'content',
'priority': 'priority', 'completed': 'completed'}
def __init__(self, view, todo_dao):
self._view = view
self._todo_dao = todo_dao
def handle(self, user_input):
i = user_input.strip().split() # user_input_parts
if len(i) == 0:
return
if i[0] == 'exit':
exit(0)
elif i[0] == 'help':
self._view.show_help()
elif i[0] == 'add':
item = self._view.item_view()
self._todo_dao.create(item)
elif i[0] == 'list':
self._view.show_list(self._todo_dao.find_all())
elif i[0] == 'rm':
if len(i) > 1:
self._todo_dao.delete(i[1])
else:
print("Incorrect command syntax!")
self._view.show_hint()
elif i[0] == 'update':
def _update(item):
updated_item = self._view.item_view(item)
updated_item.item_id = item.item_id
self._todo_dao.update(updated_item)
self.__update(i, _update)
elif i[0] == 'complete':
def _check(item):
item.completed = 'YES'
self._todo_dao.update(item)
self.__update(i, _check)
elif i[0] == 'uncomplete':
def _uncheck(item):
item.completed = 'NO'
self._todo_dao.update(item)
self.__update(i, _uncheck)
elif i[0] == 'find':
if len(i) > 2:
field = i[1].lower()
if field in self.DB_COLUMNS_ADAPTER.keys():
items = self._todo_dao.find_by_field(self.DB_COLUMNS_ADAPTER[field], i[2])
self._view.show_list(items)
else:
print("Not valid field name")
print(f"Valid field names: {self.DB_COLUMNS_ADAPTER.keys()}")
else:
print("Incorrect command syntax!")
self._view.show_hint()
else:
print("Unknown command!")
self._view.show_hint()
def __update(self, req, update_handler):
if len(req) > 1:
item = self._todo_dao.find_by_field('id', req[1])
if item:
item = item[0]
update_handler(item)
else:
print('Item does not exist')
else:
print("Incorrect command syntax!")
self._view.show_hint()
class Controller:
def run(self):
view = View()
todo_dao = ToDoItemDao()
view_handler = ViewHandler(view, todo_dao)
view.show_hint()
while True:
user_input = input("> ")
view_handler.handle(user_input)
def main():
controller = Controller()
controller.run()
if __name__ == '__main__':
main()
|
[
"igor.bogdanovich39@gmail.com"
] |
igor.bogdanovich39@gmail.com
|
0f525147539707c4ee461b2a614a896a4ab24427
|
26d6c34df00a229dc85ad7326de6cb5672be7acc
|
/msgraph-cli-extensions/beta/schemaextensions_beta/azext_schemaextensions_beta/generated/custom.py
|
9ed2ef7a2cad12e4916e3c911ac6eb7d1a783fc8
|
[
"MIT"
] |
permissive
|
BrianTJackett/msgraph-cli
|
87f92471f68f85e44872939d876b9ff5f0ae6b2c
|
78a4b1c73a23b85c070fed2fbca93758733f620e
|
refs/heads/main
| 2023-06-23T21:31:53.306655
| 2021-07-09T07:58:56
| 2021-07-09T07:58:56
| 386,993,555
| 0
| 0
|
NOASSERTION
| 2021-07-17T16:56:05
| 2021-07-17T16:56:05
| null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def schemaextensions_schema_extension_schema_extension_create_schema_extension(client,
id_=None,
description=None,
owner=None,
properties=None,
status=None,
target_types=None):
body = {}
body['id'] = id_
body['description'] = description
body['owner'] = owner
body['properties'] = properties
body['status'] = status
body['target_types'] = target_types
return client.create_schema_extension(body=body)
def schemaextensions_schema_extension_schema_extension_delete_schema_extension(client,
schema_extension_id,
if_match=None):
return client.delete_schema_extension(schema_extension_id=schema_extension_id,
if_match=if_match)
def schemaextensions_schema_extension_schema_extension_list_schema_extension(client,
orderby=None,
select=None,
expand=None):
return client.list_schema_extension(orderby=orderby,
select=select,
expand=expand)
def schemaextensions_schema_extension_schema_extension_show_schema_extension(client,
schema_extension_id,
select=None,
expand=None):
return client.get_schema_extension(schema_extension_id=schema_extension_id,
select=select,
expand=expand)
def schemaextensions_schema_extension_schema_extension_update_schema_extension(client,
schema_extension_id,
id_=None,
description=None,
owner=None,
properties=None,
status=None,
target_types=None):
body = {}
body['id'] = id_
body['description'] = description
body['owner'] = owner
body['properties'] = properties
body['status'] = status
body['target_types'] = target_types
return client.update_schema_extension(schema_extension_id=schema_extension_id,
body=body)
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
c54e85c876571404918d3ee5e9f03ce8a13da536
|
57a348eb0ebd82a7039a076445f21f10248b7c32
|
/setup.py
|
bfa6199c38f86f178bcd93208ee47843a7e4a4ea
|
[
"Python-2.0"
] |
permissive
|
blamarvt/cerealizer
|
8f0bd231213717c1aa98b12d1a98dc823c8805ce
|
75b53623a1537728a31d9595003e7c7e582600f3
|
refs/heads/master
| 2021-01-01T03:56:09.337596
| 2016-05-16T14:35:52
| 2016-05-16T14:35:52
| 58,739,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
#! /usr/bin/env python
# Cerealizer
# Copyright (C) 2005-2012 Jean-Baptiste LAMY
#
# This program is free software.
# It is available under the Python licence.
import os.path
import setuptools
import sys
setuptools.setup(
name = "Cerealizer",
version = "0.9.0",
license = "Python licence",
description = "A secure pickle-like module",
long_description = """A secure pickle-like module.
It support basic types (int, string, unicode, tuple, list, dict, set,...),
old and new-style classes (you need to register the class for security), object cycles,
and it can be extended to support C-defined type.""",
author = "Lamy Jean-Baptiste (Jiba)",
author_email = "jibalamy@free.fr",
url = "http://home.gna.org/oomadness/en/cerealizer/index.html",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python :: 2",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
package_dir = {"cerealizer" : "."},
packages = ["cerealizer"],
include_package_data = True,
)
|
[
"brian.lamar@omnivore.io"
] |
brian.lamar@omnivore.io
|
0d88c5d4df30bb424004664f1772b2177effb895
|
8f34c6548eda6f97094036136cedbaafcaf099fc
|
/src/burks_sim/gaussianMixtures.py
|
021fb4ad96808f8c32d705aac4a6c50ffd235223
|
[
"MIT"
] |
permissive
|
sharanjeetsinghmago/online_reward_shaping
|
f7a4641541fc48ac3442a8ef0dbf35798cdcad09
|
55ac60f59ea6cc48fc8a788625aa50ff08453c1d
|
refs/heads/master
| 2023-08-07T03:10:37.202849
| 2019-08-22T20:32:06
| 2019-08-22T20:32:06
| 193,143,367
| 0
| 0
|
MIT
| 2023-07-22T08:58:53
| 2019-06-21T18:27:38
|
Python
|
UTF-8
|
Python
| false
| false
| 29,430
|
py
|
#!/usr/bin/env python
from __future__ import division
'''
***********************************************************
File: gaussianMixtures.py
Classes: GM,Gaussian
Allows for the creation, use, and compression of mixtures
of multivariate normals, or Gaussian Mixture Models (GMM).
Version 1.3.5: added normalized ISD
Version 1.3.6: removed warning filtering
Version 1.3.7: added pointEval function for gaussian
Version 1.3.8: added random mixture generation and fixed
scalerMulitply to scalarMultiply
***********************************************************
'''
__author__ = "Luke Burks"
__copyright__ = "Copyright 2016, Cohrint"
__credits__ = ["Luke Burks", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.3.8"
__maintainer__ = "Luke Burks"
__email__ = "luke.burks@colorado.edu"
__status__ = "Development"
from matplotlib.colors import LogNorm
import numpy as np;
import random;
from random import random;
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
import warnings
import math
import copy
import time
from numpy.linalg import inv,det
class Gaussian:
def __init__(self,u = None,sig = None,w=1):
#warnings.filterwarnings("ignore")
if(u is None):
self.mean = [0,0];
else:
self.mean = u;
if(sig is None):
self.sig = [[1,0],[0,1]];
else:
self.var = sig;
self.weight = w;
def display(self):
print("Mean: ");
print(self.mean);
print("Variance: ");
print(self.var);
print("Weight");
print(self.weight);
def fullComp(self,b):
if(not np.array_equal(self.mean,b.mean)):
return False;
if(not np.array_equal(self.var,b.var)):
return False;
if(self.weight != b.weight):
return False;
return True;
def pointEval(self,x):
'''
Evaluates the Gaussian at a point x
'''
self.clean();
return g.weight*mvn.pdf(x,g.mean,g.var);
class GM:
def __init__(self,u=None,s=None,w=None):
'''
Initialize with either:
1. Nothing, empty mixture
2. Single values, mixture of size 1
3. Lists of values, mixture of size n
'''
self.Gs = [];
if(w == None):
self.size = 0;
elif(isinstance(w,float) or isinstance(w,int)):
self.size = 0;
self.addG(Gaussian(u,s,w));
elif(len(w) > 1):
for i in range(0,len(w)):
self.Gs += [Gaussian(u[i],s[i],w[i])];
self.size = len(self.Gs);
self.action = -1;
#Index Overrides
def __getitem__(self,key):
return self.Gs[key];
def __setitem__(self,key,value):
self.Gs[key] = value;
def getMeans(self):
'''
Returns a list containing the mean
of each mixand
'''
ans = [];
for g in self.Gs:
ans.append(g.mean);
return ans;
def getVars(self):
'''
Returns a list containing the variance
of each mixand
'''
ans = [];
for g in self.Gs:
ans.append(g.var);
return ans;
def getWeights(self):
'''
Returns a list containing the weights
of each mixand
'''
ans = [];
for g in self.Gs:
ans.append(g.weight);
return ans;
def makeRandomMixture(self,size=10,dims=2,perMax = 10,lowBound = 0, highBound = 10):
#In case you forget, perMax refers to percision
for i in range(0,size):
self.addG(self.sampleWishart(dims,perMax,lowBound,highBound));
self.normalizeWeights();
def sampleWishart(self,dims,sigMax,lowBound,highBound):
sigPrior = np.diag(np.ones(dims))*sigMax;
df = dims;
cholesky = np.linalg.cholesky(sigPrior);
X = np.dot(cholesky,np.random.normal(size=(dims,df)));
sigma = np.linalg.inv(np.dot(X,X.T));
weight = np.random.random();
lowInit = [lowBound]*dims;
highInit = [highBound]*dims;
mu = [];
for i in range(0,dims):
mu.append(np.random.random()*(highInit[i]-lowInit[i]) + lowInit[i]);
sigma=sigma.tolist();
return Gaussian(mu,sigma,weight);
def clean(self):
for g in self.Gs:
if(not isinstance(g.mean,list) and not isinstance(g.mean,int) and not isinstance(g.mean,float)):
g.mean = g.mean.tolist();
if(not isinstance(g.var,list) and not isinstance(g.var,int) and not isinstance(g.var,float)):
g.var = g.var.tolist();
if(not isinstance(g.mean,int) and not isinstance(g.mean,float)):
while(len(g.mean) != len(g.var)):
g.mean = g.mean[0];
if(not isinstance(g.var,int) and not isinstance(g.var,float)):
for i in range(0,len(g.var)):
g.var[i][i] = abs(g.var[i][i]);
def findMAP2D(self):
'''
Retreives a 2D grid and returns the
maximum point
'''
[a,b,res] = self.plot2D(vis=False);
MAP= [0,0];
meanVal = [-10000];
for i in range(0,len(res)):
for j in range(0,len(res[i])):
if(res[i][j] > meanVal):
meanVal = res[i][j];
MAP = [i/20,j/20];
return MAP;
def findMAPN(self):
'''
Bad approximation for the MAP point of an N-dimensional GMM.
Returns the mixand mean with the highest contribution from all
mixands.
'''
cands = [0]*self.size;
for i in range(0,self.size):
for j in range(0,self.size):
cands[i] += mvn.pdf(self.Gs[i].mean,self.Gs[j].mean,self.Gs[j].var)*self.Gs[j].weight;
best = cands.index(max(cands));
return(self.Gs[best].mean);
def plot(self,low = -20,high = 20,num = 1000,vis = True):
'''
Plots a 1D GMM from low to high, with resolution=num.
If vis argument is false it returns the values at each point.
'''
a = np.linspace(low,high,num= num);
b = [0.0]*num;
for g in self.Gs:
b += mvn.pdf(a,g.mean,g.var)*g.weight;
if(vis):
plt.plot(a,b);
plt.show();
else:
return [a,b];
def plot2D(self,low = [0,0], high = [5,5],vis = True,res = 100,xlabel = 'Cop Belief',ylabel = 'Robber Belief',title = 'Belief'):
'''
Plots a contour plot of a 2D GMM from low to high in each dimension, with resolution=res.
If vis argument is false it returns the arguments required to plot in order of the
x values, the y values, and the calculated mixture values.
Note: This may not be very efficient depending on the resolution
'''
c = [[0 for i in range(0,res)] for j in range(0,res)];
x, y = np.mgrid[low[0]:high[0]:(float(high[0]-low[0])/res), low[1]:high[1]:(float(high[1]-low[1])/res)]
pos = np.dstack((x, y))
#self.clean();
for g in self.Gs:
try:
c += mvn.pdf(pos,g.mean,g.var)*g.weight;
except:
g.display();
raise;
if(vis):
fig,ax = plt.subplots();
ax.contourf(x,y,c,cmap = 'viridis');
#fig.colorbar();
ax.set_xlabel(xlabel);
ax.set_ylabel(ylabel);
ax.set_title(title);
plt.show();
else:
return x,y,c;
def slice2DFrom4D(self,low = [0,0],high = [5,5],res = 100, dims = [2,3],vis = True,retGS = False):
'''
Plots a 2D GMM from a 4D GMM by ignoring entries in the mean or variance not associated with those dimensions
Argument retGS = True will return the 2D GMM
Argument vis = True will plot the 2D GMM using the plot2D function
Otherwise the results are returned through the plot2D(vis=False) function.
'''
newGM = GM();
for g in self.Gs:
mean = [g.mean[dims[0]],g.mean[dims[1]]];
var = [[g.var[dims[0]][dims[0]],g.var[dims[0]][dims[1]]],[g.var[dims[1]][dims[0]],g.var[dims[1]][dims[1]]]]
weight = g.weight;
newGM.addG(Gaussian(mean,var,weight));
if(vis):
newGM.plot2D(low = low,high = high,res=res,vis = vis,xlabel = 'RobberX',ylabel = 'RobberY',title = 'Cops Belief of Robber');
elif(retGS):
return newGM;
else:
return newGM.plot2D(low = low,high = high,res=res,vis = vis,xlabel = 'RobberX',ylabel = 'RobberY',title = 'Cops Belief of Robber');
def normalizeWeights(self):
'''
Normalizes the weights of the mixture such that they all add up to 1.
'''
suma = 0;
for g in self.Gs:
suma += g.weight;
for g in self.Gs:
g.weight = g.weight/suma;
self.size = len(self.Gs);
def addGM(self,b):
'''
Combines a new mixture with this one.
'''
for i in range(0,len(b.Gs)):
self.addG(b.Gs[i]);
self.size = len(self.Gs);
def addNewG(self,mean,var,weight):
'''
Adds another mixand to this mixture by specifying the parameters of the Gaussian.
'''
self.addG(Gaussian(mean,var,weight));
def addG(self,b):
'''
Adds another mixand to this mixture by specifying the Gaussian directly
'''
self.Gs += [b];
self.size+=1;
self.size = len(self.Gs);
def display(self):
print("Means");
print([self.Gs[i].mean for i in range(0,self.size)]);
print("Variances");
print([self.Gs[i].var for i in range(0,self.size)]);
print("Weights");
print([self.Gs[i].weight for i in range(0,self.size)]);
if(self.action is not None):
print("Action");
print(self.action);
def fullComp(self,b):
'''
Compares two GMMs. If they are identical, return true,
else return false.
Works for the general case
'''
if(self.size != b.size):
return False;
for i in range(0,self.size):
if(not np.array_equal(self.Gs[i].mean,b.Gs[i].mean)):
return False;
if(not np.array_equal(self.Gs[i].var,b.Gs[i].var)):
return False;
if(self.Gs[i].weight != b.Gs[i].weight):
return False;
return True;
def pointEval(self,x):
'''
Evaluates the GMM at a point x by summing together each mixands contribution
'''
suma = 0;
self.clean();
for g in self.Gs:
suma += g.weight*mvn.pdf(x,g.mean,g.var);
return suma;
def distance(self,a,b):
#General N-dimensional euclidean distance
dist = 0;
for i in range(0,len(a)):
dist += (a[i]-b[i])**2;
dist = math.sqrt(dist);
return dist;
def ISD(self,g2,normed=True):
#Integrated Squared Difference
#From "Cost-Function-Based Gaussian Mixture Reduction for Target Tracking"
#by Jason Williams, Peter Maybeck
#Normalized Itegrated Squared Difference
#From "Gaussian Mixture reduction based on fuzzy adaptive resonance theory for extended target tracking", 2013
#NISD(g1,g2) = sqrt((int(g1^2) - 2int(g1*g2) + int(g2^2)) / (int(g1^2) + int(g2^2)))
#NISD(g1,g2) = sqrt((int(Jhh) - 2int(Jhr) + int(Jrr)) / (int(Jhh) + int(Jrr)))
#Js = Jhh - 2Jhr + Jrr
#Jhh = self-likeness for g1
#Jhr = cross-likeness
#Jrr = self-likeness for g2
Jhh = 0;
for g in self.Gs:
for h in self.Gs:
Jhh += g.weight*h.weight*mvn.pdf(g.mean,h.mean,np.matrix(g.var) + np.matrix(h.var));
Jrr = 0;
for g in g2.Gs:
for h in g2.Gs:
Jrr += g.weight*h.weight*mvn.pdf(g.mean,h.mean,np.matrix(g.var) + np.matrix(h.var));
Jhr = 0;
for g in self.Gs:
for h in g2.Gs:
Jhr += g.weight*h.weight*mvn.pdf(g.mean,h.mean,np.matrix(g.var) + np.matrix(h.var));
if(normed):
Js = np.sqrt((Jhh-2*Jhr+Jrr)/(Jhh+Jrr));
else:
Js = Jhh-2*Jhr+Jrr;
return Js;
#General N-dimensional
def kmeansCondensationN(self,k=10,lowInit=None,highInit = None,maxIter = 100):
'''
Condenses mixands by first clustering them into k groups, using
k-means. Then each group is condensed to a single
Gaussian using Runnalls Method. Each Gaussian is then added to a new GMM.
Has a tendency to overcondense
Inputs:
k: number of mixands in the returned GMM
lowInit: lower bound on the placement of initial grouping means
highInit: upper bound on placement of initial grouping means
'''
if(self.size <= k):
return self;
if(lowInit == None):
lowInit = [0]*len(self.Gs[0].mean);
if(highInit == None):
highInit = [5]*len(self.Gs[0].mean)
#Initialize the means. Spread randomly through the bounded space
means = [0]*k;
for i in range(0,k):
tmp = [];
if(isinstance(self.Gs[0].mean,list)):
for j in range(0,len(self.Gs[0].mean)):
tmp.append(random()*(highInit[j]-lowInit[j]) + lowInit[j]);
else:
tmp.append(random()*(highInit-lowInit) + lowInit);
means[i] = tmp;
converge = False;
count = 0;
newMeans = [0]*k;
while(converge == False and count < maxIter):
clusters = [GM() for i in range(0,k)];
for g in self.Gs:
#put the gaussian in the cluster which minimizes the distance between the distribution mean and the cluster mean
if(isinstance(g.mean,list)):
clusters[np.argmin([self.distance(g.mean,means[j]) for j in range(0,k)])].addG(g);
else:
clusters[np.argmin([self.distance([g.mean],means[j]) for j in range(0,k)])].addG(g);
#find the mean of each cluster
newMeans = [0]*k;
for i in range(0,k):
if(isinstance(self.Gs[0].mean,list)):
newMeans[i] = np.array([0]*len(self.Gs[0].mean));
for g in clusters[i].Gs:
newMeans[i] = np.add(newMeans[i],np.divide(g.mean,clusters[i].size));
if(np.array_equal(means,newMeans)):
converge = True;
count = count+1;
for i in range(0,len(newMeans)):
for j in range(0,len(newMeans[i])):
means[i][j] = newMeans[i][j];
#condense each cluster
for c in clusters:
c.condense(1);
#add each cluster back together
ans = GM();
for c in clusters:
ans.addGM(c);
ans.action = self.action;
#Make sure everything is positive semidefinite
#TODO: dont just remove them, fix them?
dels = [];
for g in ans.Gs:
if(det(np.matrix(g.var)) <= 0):
dels.append(g);
for rem in dels:
if(rem in ans.Gs):
ans.Gs.remove(rem);
ans.size -= 1
#return the resulting GMM
return ans;
def printClean(self,slices):
'''
Cleans lists in preparation for printing to plain text files
'''
slices = str(slices);
slices = slices.replace(']','');
slices = slices.replace(',','');
slices = slices.replace('[','');
return slices;
def printGMArrayToFile(self,GMArr,fileName):
'''
Prints an Array of GMs to a text file, in a way that can be read
by the readGMArry4D function or similar functions.
Note: The only reason this exists is due to a phantom error using numpy load and save
on one of our lab computers. Highly recommend just pickleing these things.
'''
f = open(fileName,"w");
for i in range(0,len(GMArr)):
GMArr[i].printToFile(f);
f.close();
def printToFile(self,file):
'''
Prints a single Gaussian Mixture to a plain text file
Note: The only reason this exists is due to a phantom error using numpy load and save
on one of our lab computers. Highly recommend just pickleing these things.
'''
#first line is N, number of gaussians
#next N lines are, mean, variance, weight
file.write(str(self.size) + " " + str(self.action) + "\n");
for g in self.Gs:
m = self.printClean(g.mean);
var = self.printClean(g.var);
w = self.printClean(g.weight);
file.write(m + " " + var + " " + w + "\n");
def readGMArray4D(self,fileName):
'''
Extracts a 4 dimensional Gaussian Mixture from a text file
created by printGMArrayToFile function.
Note: The only reason this exists is due to a phantom error using numpy load and save
on one of our lab computers. Highly recommend just pickleing these things.
'''
file = open(fileName,"r");
lines = np.fromfile(fileName,sep = " ");
ans = []
count = 0;
countL = len(lines);
while(count < countL):
tmp = lines[count:];
num = int(tmp[0]);
act = int(tmp[1]);
count = count + 2;
cur = GM();
cur.action = act;
for i in range(0,num):
tmp = lines[count:]
count = count + 21;
mean = [float(tmp[0]),float(tmp[1]),float(tmp[2]),float(tmp[3])];
var = [[float(tmp[4]),float(tmp[5]),float(tmp[6]),float(tmp[7])],[float(tmp[8]),float(tmp[9]),float(tmp[10]),float(tmp[11])],[float(tmp[12]),float(tmp[13]),float(tmp[14]),float(tmp[15])],[float(tmp[16]),float(tmp[17]),float(tmp[18]),float(tmp[19])]];
weight = float(tmp[20]);
cur.addG(Gaussian(mean,var,weight));
ans += [cur];
return ans;
def scalarMultiply(self,s):
'''
Multiplies the weight of each mixand by scalar s
'''
for g in self.Gs:
g.weight = s*g.weight;
def GMProduct(self,b,cond = -1):
'''
Returns the product of two Gaussian Mixtures, which is also a Gaussian Mixture
If cond != -1, condenses the mixture to cond mixands before returning
'''
result = GM();
for g1 in self.Gs:
u1 = copy.deepcopy(np.matrix(g1.mean));
var1 = np.matrix(g1.var);
w1 = g1.weight;
for g2 in b.Gs:
u2 = copy.deepcopy(np.matrix(g2.mean));
var2 = np.matrix(g2.var);
w2 = g2.weight;
weight = w1*w2*mvn.pdf(u1.tolist()[0],u2.tolist()[0],var1+var2);
var = (var1.I + var2.I).I;
mean = var*(var1.I*np.transpose(u1) + var2.I*np.transpose(u2));
mean = np.transpose(mean).tolist()[0];
var = var.tolist();
result.addNewG(mean,var,weight);
if(cond != -1):
result.condense(cond);
return result;
def sample(self,num):
w = copy.deepcopy(self.getWeights());
suma = 0;
for i in range(0,len(w)):
suma+=w[i];
for i in range(0,len(w)):
w[i] = w[i]/suma;
means = self.getMeans();
var = self.getVars();
allSamps = [];
for count in range(0,num):
cut = np.random.choice(range(0,len(w)),p=w);
if(isinstance(means[0],int) or isinstance(means[0],float)):
samp = np.random.normal(means[cut],var[cut],1).tolist()[0];
else:
samp = np.random.multivariate_normal(means[cut],var[cut],1).tolist()[0];
allSamps.append(samp);
return allSamps;
def discretize2D(self,low = [0,0],high=[10,10], delta = 0.1):
#Added in version 1.3.1
#Inputs:
# low, lower bounds on x and y axes
# high, upper bounds on x and y axes
# delta, discretization constant, grid-cell length
#Output:
# A 2D numpy array with grid cells from low to high by delta
x, y = np.mgrid[low[0]:high[0]:delta, low[1]:high[1]:delta]
pos = np.dstack((x, y))
#self.clean();
# c = None;
c = np.zeros(shape=(pos.shape[0],pos.shape[1]))
for g in self.Gs:
try:
# print('THIS IS THE VALUE OF c is {}'.format(c))
# if(c == None):
# c = mvn.pdf(pos,g.mean,g.var)*g.weight;
# else:
c += mvn.pdf(pos,g.mean,g.var)*g.weight;
except:
g.display();
raise;
return c;
def condense(self, max_num_mixands=None):
'''
Runnalls Method for Gaussian Mixture Condensation.
Adapted from Nick Sweets gaussian_mixture.py
https://github.com/COHRINT/cops_and_robots/blob/dev/src/cops_and_robots/fusion/gaussian_mixture.py
Now valid for negative weights
If mixture contains all identical mixands at any point, it returns the mixture as is.
'''
if max_num_mixands is None:
max_num_mixands = self.max_num_mixands
#Check if any mixands are small enough to not matter
#specifically if they're weighted really really low
dels = [];
for g in self.Gs:
if(abs(g.weight) < 0.000001):
dels.append(g);
for rem in dels:
if(rem in self.Gs):
self.Gs.remove(rem);
self.size = self.size-1;
#Check if any mixands are identical
dels = [];
for i in range(0,self.size):
for j in range(0,self.size):
if(i==j):
continue;
g1 = self.Gs[i];
g2 = self.Gs[j];
if(g1.fullComp(g2) and g1 not in dels):
dels.append(g2);
g1.weight = g1.weight*2;
for rem in dels:
if(rem in self.Gs):
self.Gs.remove(rem);
self.size = self.size-1;
#Check if merging is useful
if self.size <= max_num_mixands:
return 0;
# Create lower-triangle of dissimilarity matrix B
#<>TODO: this is O(n ** 2) and very slow. Speed it up! parallelize?
B = np.zeros((self.size, self.size))
for i in range(self.size):
mix_i = (self.Gs[i].weight, self.Gs[i].mean, self.Gs[i].var)
for j in range(i):
if i == j:
continue
mix_j = (self.Gs[j].weight, self.Gs[j].mean, self.Gs[j].var)
B[i,j] = self.mixand_dissimilarity(mix_i, mix_j)
# Keep merging until we get the right number of mixands
deleted_mixands = []
toRemove = [];
while self.size > max_num_mixands:
# Find most similar mixands
try:
min_B = B[abs(B)>0].min()
except:
#self.display();
#raise;
return;
ind = np.where(B==min_B)
i, j = ind[0][0], ind[1][0]
# Get merged mixand
mix_i = (self.Gs[i].weight, self.Gs[i].mean, self.Gs[i].var)
mix_j = (self.Gs[j].weight, self.Gs[j].mean, self.Gs[j].var)
w_ij, mu_ij, P_ij = self.merge_mixands(mix_i, mix_j)
# Replace mixand i with merged mixand
ij = i
self.Gs[ij].weight = w_ij
self.Gs[ij].mean = mu_ij.tolist();
self.Gs[ij].var = P_ij.tolist();
# Fill mixand i's B values with new mixand's B values
mix_ij = (w_ij, mu_ij, P_ij)
deleted_mixands.append(j)
toRemove.append(self.Gs[j]);
#print(B.shape[0]);
for k in range(0,B.shape[0]):
if k == ij or k in deleted_mixands:
continue
# Only fill lower triangle
# print(self.size,k)
mix_k = (self.Gs[k].weight, self.Gs[k].mean, self.Gs[k].var)
if k < i:
B[ij,k] = self.mixand_dissimilarity(mix_k, mix_ij)
else:
B[k,ij] = self.mixand_dissimilarity(mix_k, mix_ij)
# Remove mixand j from B
B[j,:] = np.inf
B[:,j] = np.inf
self.size -= 1
# Delete removed mixands from parameter arrays
for rem in toRemove:
if(rem in self.Gs):
self.Gs.remove(rem);
#Make sure everything is positive semidefinite
#TODO: dont just remove them, fix them?
dels = [];
for g in self.Gs:
if(det(np.matrix(g.var)) <= 0):
dels.append(g);
for rem in dels:
if(rem in self.Gs):
self.Gs.remove(rem);
self.size -= 1
def mixand_dissimilarity(self,mix_i, mix_j):
"""Calculate KL descriminiation-based dissimilarity between mixands.
"""
# Get covariance of moment-preserving merge
w_i, mu_i, P_i = mix_i
w_j, mu_j, P_j = mix_j
_, _, P_ij = self.merge_mixands(mix_i, mix_j)
'''
#TODO: This is different
if(w_i < 0 and w_j< 0):
w_i = abs(w_i);
w_j = abs(w_j);
'''
if(P_ij.ndim == 1 or len(P_ij.tolist()[0]) == 1):
if(not isinstance(P_ij,(int,list,float))):
P_ij = P_ij.tolist()[0];
while(isinstance(P_ij,list)):
P_ij = P_ij[0];
if(not isinstance(P_i,(int,list,float))):
P_i = P_i.tolist()[0];
while(isinstance(P_i,list)):
P_i = P_i[0];
if(not isinstance(P_j,(int,list,float))):
P_j = P_j.tolist()[0];
while(isinstance(P_j,list)):
P_j = P_j[0];
logdet_P_ij = P_ij;
logdet_P_i = P_i;
logdet_P_j = P_j;
else:
# Use slogdet to prevent over/underflow
_, logdet_P_ij = np.linalg.slogdet(P_ij)
_, logdet_P_i = np.linalg.slogdet(P_i)
_, logdet_P_j = np.linalg.slogdet(P_j)
# <>TODO: check to see if anything's happening upstream
if np.isinf(logdet_P_ij):
logdet_P_ij = 0
if np.isinf(logdet_P_i):
logdet_P_i = 0
if np.isinf(logdet_P_j):
logdet_P_j = 0
#print(logdet_P_ij,logdet_P_j,logdet_P_i)
b = 0.5 * ((w_i + w_j) * logdet_P_ij - w_i * logdet_P_i - w_j * logdet_P_j)
return b
def merge_mixands(self,mix_i, mix_j):
"""Use moment-preserving merge (0th, 1st, 2nd moments) to combine mixands.
"""
# Unpack mixands
w_i, mu_i, P_i = mix_i
w_j, mu_j, P_j = mix_j
mu_i = np.array(mu_i);
mu_j = np.array(mu_j);
P_j = np.matrix(P_j);
P_i = np.matrix(P_i);
# Merge weights
w_ij = w_i + w_j
w_i_ij = w_i / (w_i + w_j)
w_j_ij = w_j / (w_i + w_j)
# Merge means
mu_ij = w_i_ij * mu_i + w_j_ij * mu_j
P_j = np.matrix(P_j);
P_i = np.matrix(P_i);
# Merge covariances
P_ij = w_i_ij * P_i + w_j_ij * P_j + \
w_i_ij * w_j_ij * np.outer(self.subMu(mu_i,mu_j), self.subMu(mu_i,mu_j))
return w_ij, mu_ij, P_ij
def subMu(self,a,b):
if(isinstance(a,np.ndarray)):
return a-b;
if(isinstance(a,(float,int))):
return a-b;
else:
c = [0]*len(a);
for i in range(0,len(a)):
c[i] = a[i]-b[i];
return c;
def TestGMProduct():
a = GM([1,8,3],[1,1,1],[1,1,1]);
b = GM([4,2,6],[1,1,1],[1,1,1]);
c = a.GMProduct(b);
low = 0;
high = 10;
num = 1000;
x = np.linspace(low,high,num);
aPlot = a.plot(low=low,high = high,num=num,vis = False);
bPlot = b.plot(low=low,high = high,num=num,vis=False);
cPlot = c.plot(low=low,high = high,num=num,vis=False);
plt.plot(x,aPlot);
plt.plot(x,bPlot);
plt.plot(x,cPlot);
plt.title("Gaussian Mixture Product Test");
plt.legend(['First Mixture','Second Mixture','Product']);
plt.show();
def Test2DGMProduct():
g1 = GM([2,1],[[1,0],[0,2]],1);
g2 = GM([1,5],[[4,0],[0,1]],1);
mix = g2.GMProduct(g1,cond=-1);
[x1,y1,c1] = g1.plot2D(vis = False);
[x2,y2,c2] = g2.plot2D(vis = False);
[x3,y3,c3] = mix.plot2D(vis = False);
fig,axarr = plt.subplots(3,sharex = True);
axarr[0].contourf(x1,y1,c1,cmap = 'viridis');
axarr[0].set_title('First Mixture');
axarr[1].contourf(x2,y2,c2,cmap = 'viridis');
axarr[0].set_title('Second Mixture');
axarr[2].contourf(x3,y3,c3,cmap = 'viridis');
axarr[0].set_title('Product Mixture');
plt.suptitle('Testing the product of 2D Gaussians');
plt.show();
def Test4DGMProduct():
#Courtesy of Mike Ouimet
m1 = [[0, 0, 0, 0], [1,1,1,1]] #means
s1 = [np.eye(4), 2*np.eye(4)] #variances
m2 = [[0, 1, -1, 0], [1,0,-1,1]]
s2 = [4*np.eye(4), 1*np.eye(4)]
g1 = GM(u=m1, s=s1, w=[1,1])
g2 = GM(u=m2, s=s2, w=[1,1])
mix = g2.GMProduct(g1,cond = -1)
print("The resulting mixture:");
mix.display();
fig,ax = plt.subplots(2,2);
[x1,y1,c1] = mix.slice2DFrom4D(vis=False,dims=[0,2]);
ax[0,0].contourf(x1,y1,c1,cmap = 'viridis');
ax[0,0].set_title('X1 by X3');
[x2,y2,c2] = mix.slice2DFrom4D(vis=False,dims=[0,3]);
ax[0,1].contourf(x2,y2,c2,cmap = 'viridis');
ax[0,1].set_title('X1 by X4');
[x3,y3,c3] = mix.slice2DFrom4D(vis=False,dims=[1,2]);
ax[1,0].contourf(x3,y3,c3,cmap = 'viridis');
ax[1,0].set_title('X2 by X3');
[x4,y4,c4] = mix.slice2DFrom4D(vis=False,dims=[1,3]);
ax[1,1].contourf(x4,y4,c4,cmap = 'viridis');
ax[1,1].set_title('X2 by X4');
fig.suptitle("Slices along Various Axis in 2D from 4D");
plt.show();
def TestTextFilePrinting():
prior = GM([0,-2,1,2],[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],1);
prior.addG(Gaussian([0,-2,1,2],[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],1))
pri = GM([0,-2,1,2],[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],1);
pri.addG(Gaussian([0,-2,1,2],[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],1))
file = './loadTest.txt';
prior.printGMArrayToFile([prior,pri],file);
tmp = GM();
post = tmp.readGMArray4D(file);
post[0].display();
def TestCondense():
test = GM();
for i in range(0,100):
test.addNewG(random()*10,random()*2,random()*5);
testKmeans = copy.deepcopy(test);
low = 0;
high = 10;
num = 1000;
x = np.linspace(low,high,num);
[x0,testPlot] = test.plot(low=low,high = high,num=num,vis = False);
test.condense(10);
[x1,testCondensePlot] = test.plot(low=low,high = high,num=num,vis = False);
testKmeans = testKmeans.kmeansCondensationN(k=10,lowInit = low, highInit = high);
[x2,testKmeansPlot] = testKmeans.plot(low=low,high = high,num=num,vis = False);
plt.plot(x0,testPlot);
plt.plot(x1,testCondensePlot);
plt.plot(x2,testKmeansPlot);
plt.legend(['Original Mixture','Condensed Mixture (Runnalls)','Condensed Mixture (K-means Runnalls)']);
plt.title('Condensation Test: 100 to 10 mixands')
plt.show();
def TestCondense2D():
test = GM();
for i in range(0,100):
test.addG(Gaussian([random()*10,random()*10],[[random()*2,0],[0,random()*2]],random()*5));
testKmeans = copy.deepcopy(test);
low = [0,0];
high = [10,10];
[x1,y1,c1] = test.plot2D(vis=False);
test.condense(10);
[x2,y2,c2] = test.plot2D(vis = False);
testKmeans = testKmeans.kmeansCondensationN(k = 10, lowInit = low, highInit = high);
[x3,y3,c3] = testKmeans.plot2D(vis=False);
fig,axarr = plt.subplots(3,sharex = True);
axarr[0].contourf(x1,y1,c1,cmap = 'viridis');
axarr[0].set_title('Original Mixture');
axarr[1].contourf(x2,y2,c2,cmap = 'viridis');
axarr[1].set_title('Runnalls Method Condensed Mixture');
axarr[2].contourf(x3,y3,c3,cmap = 'viridis');
axarr[2].set_title('K-means + Runnalls Method Condensed Mixture');
plt.suptitle('2D Condensation Test: 100 to 10 mixands');
plt.show();
def TestComparison():
test1 = GM();
test1.addG(Gaussian([0,1],[[1,0],[0,1]],1));
test1.addG(Gaussian([1,2],[[1,0],[0,1]],1));
test2 = GM();
test2.addG(Gaussian([0,1],[[1,0],[0,1]],1));
test2.addG(Gaussian([1,2],[[1,0],[0,1]],1));
test3 = GM();
test3.addG(Gaussian([0,5],[[1,0],[0,1]],1));
test3.addG(Gaussian([1,2],[[1,0],[0,1]],1));
print('Test1 and Test2: ' + str(test1.fullComp(test2)));
print('Test1 and Test3: ' + str(test1.fullComp(test3)));
def TestSample():
test1 = GM();
test1.addG(Gaussian(0,1,.33));
test1.addG(Gaussian(10,1,.33));
test1.addG(Gaussian(-5,1,.33));
samps = test1.sample(10000);
plt.hist(samps,normed=1,bins = 100);
plt.show();
def TestSample2D():
test1 = GM();
test1.addG(Gaussian([0,0],[[1,0],[0,1]],.33));
test1.addG(Gaussian([3,3],[[1,0],[0,1]],.33));
test1.addG(Gaussian([-2,-2],[[1,0],[0,1]],.33));
samps = test1.sample(10000);
sampsx = [samps[i][0] for i in range(0,len(samps))];
sampsy = [samps[i][1] for i in range(0,len(samps))];
plt.hist2d(sampsx,sampsy,normed = 1,bins=100);
plt.show();
def TestDiscretization():
test1 = GM();
test1.addG(Gaussian([0,0],[[1,0],[0,1]],.33));
test1.addG(Gaussian([3,3],[[1,0],[0,1]],.33));
test1.addG(Gaussian([-2,-2],[[1,0],[0,1]],.33));
grid = test1.discretize2D(low=[-15,-15],high=[15,15],delta=0.01);
print(grid.shape);
plt.contourf(grid);
plt.show();
def TestRandomMixtureCreation():
dims=2;
size = 10;
low = 1;
high = 2;
per = 3;
gm = GM();
gm.makeRandomMixture(size,dims,per,low,high);
gm.plot2D();
if __name__ == "__main__":
#TestGMProduct();
#Test2DGMProduct();
#Test4DGMProduct();
#TestTextFilePrinting();
#TestCondense();
#TestCondense2D();
#TestComparison();
#TestSample();
#TestSample2D();
#TestDiscretization();
TestRandomMixtureCreation();
|
[
"matthew.luebbers@colorado.edu"
] |
matthew.luebbers@colorado.edu
|
2b077ef9b1f4711fd171d4573c7c3bee70cb0122
|
96e03ae90e906e8dea62dd495b2d10679c4a4489
|
/YelpCamp/manage.py
|
86cb701f6f0ba88b2c1b743b591b7b64dc237bc9
|
[] |
no_license
|
ryanMiranda98/YelpCamp
|
0cbb9776aa6dd28849e1be776b278adcef364d98
|
f369f41f67a0fbb00fa2d831e1df8ffd7c0530c8
|
refs/heads/master
| 2021-02-07T16:31:55.982852
| 2020-03-03T11:25:17
| 2020-03-03T11:25:17
| 244,051,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'YelpCamp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"ryanjude.miranda@gmail.com"
] |
ryanjude.miranda@gmail.com
|
6b4cdd9254180d22591588ada9e0d4b49941a84e
|
acff427a36d6340486ff747ae9e52f05a4b027f2
|
/main/programming/language/perl/perl-JSON/actions.py
|
406126c3b23c3ef8a36f701469832a2f10bc1367
|
[] |
no_license
|
jeremie1112/pisilinux
|
8f5a03212de0c1b2453132dd879d8c1556bb4ff7
|
d0643b537d78208174a4eeb5effeb9cb63c2ef4f
|
refs/heads/master
| 2020-03-31T10:12:21.253540
| 2018-10-08T18:53:50
| 2018-10-08T18:53:50
| 152,126,584
| 2
| 1
| null | 2018-10-08T18:24:17
| 2018-10-08T18:24:17
| null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import perlmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "%s-%s" % (get.srcNAME()[5:], get.srcVERSION())
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def check():
perlmodules.make("test")
def install():
perlmodules.install()
pisitools.dodoc("Changes", "README")
|
[
"erkanisik@yahoo.com"
] |
erkanisik@yahoo.com
|
b9c2f137115eb5978937bc2aed8add1ebb519386
|
e10ff4ab332f88418c8d74c1b9d15130af7662ec
|
/loginadmin.py
|
c3f4c8801ccf08543af8fed3c4f2b5eb76e5b972
|
[] |
no_license
|
Skull1991/Nitesh-Employee
|
eecb2f41b1888208fd3c48f2541f2cb4f8f6865d
|
1572be5551146abbe441ed77f5bdc45a3d479330
|
refs/heads/master
| 2023-07-14T16:20:46.025271
| 2021-08-22T11:20:21
| 2021-08-22T11:20:21
| 398,762,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from tkinter import *
from tkinter import messagebox
from PIL import Image,ImageTk
import os
root=Tk()
root.geometry("1366x768+60+10")
root.resizable(0, 0)
global e1
global e2
def ok():
uname=e1.get()
password=e2.get()
if(uname==""and password==""):
messagebox.showinfo("","Blank Not Allowed")
elif(uname=="admin"and password=="admin"):
messagebox.showinfo("","Login Success")
root.withdraw()
os.system("admin.py")
else:
messagebox.showinfo("","Incorrect")
root.title("Login")
myimage=ImageTk.PhotoImage(Image.open('./images/adminlogin.png'))
Label(image=myimage).pack()
#e1 entry for username entry
e1=Entry(root,width=40,border=0,font=('Consolas',13))
e1.place(x=510,y=210)
#e2 entry for password entry
e2=Entry(root,width=40,border=0,show='*',font=('Consolas',13))
e2.place(x=510,y=300)
Button(root,text='LOGIN',font=('Consolas',20), padx=20,pady=10,cursor='hand2',border=0,bg="#6dcff6",
activebackground="#6dcff6",command=ok).place(x=625,y=515)
root.mainloop()
|
[
"skull.shanto10@gmail.com"
] |
skull.shanto10@gmail.com
|
6d4853e5820ffc6d3d4ed4902424ee265288cdae
|
ca3d5eeb740b372095d9169591a572d32efd672e
|
/script/utils.py
|
e47222fe96eab2ec2399f73a75f12cfe4ae477f5
|
[] |
no_license
|
JianqiaoAirport/SARN_CTR
|
514a23b49414cbe049c503adaa3cc7ca143cff38
|
b3467be05eca4c2374c3f5341a61ed9273babd0a
|
refs/heads/master
| 2022-05-30T14:45:33.382155
| 2020-05-01T06:10:58
| 2020-05-01T06:10:58
| 260,198,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,694
|
py
|
# -*- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import *
from tensorflow.python.ops.rnn_cell_impl import _Linear
from tensorflow import keras
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from keras import backend as K
class QAAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(QAAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
new_h = (1. - att_score) * state + att_score * c
return new_h, new_h
class VecAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu",reuse=tf.AUTO_REUSE):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
# 根据预测值进行降序
arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)
pos, neg = 0., 0.
# 记录正样本和负样本的数量
for record in arr:
if record[1] == 1.:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
# 相当于将阈值从大变到小,记录那时的tp和fp
for record in arr:
if record[1] == 1.:
tp += 1
else:
fp += 1
xy_arr.append([fp/neg, tp/pos])
# 计算面积。矩形面积的一半。
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print ("querry_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
return output
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag, reuse=tf.AUTO_REUSE)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def clu_tar_din_fcn_attention(query, facts, attention_size, mask, stag='clu_tar', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print("facts is tuple")
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
print("len(facts) == 2")
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameterstf.layers.dense(bn1, 50, activation=None, name='f1')
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query, scope=stag+"prelu")
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
# paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
# if not forCnn:
# scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
tf.summary.histogram('clu_tar scores', scores)
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def din_multi_fcn_attention(query, facts, attention_size, mask, stag='clu_tar', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print("facts is tuple")
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
# print("len(facts) == 2")
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
# 如果query为0,则mask全部转换成0
query_sum = tf.reduce_sum(query, 1)
query_sum = tf.expand_dims(query_sum, 1)
query_sums = tf.tile(query_sum, [1, tf.shape(mask)[1]])
temp_mask = tf.multiply(tf.cast(mask, tf.float32), query_sums)
one = tf.ones_like(temp_mask)
zero = tf.zeros_like(temp_mask)
mask = tf.where(tf.abs(temp_mask) > 0, x=one, y=zero)
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
query = tf.layers.dense(query, facts_size, activation=None, use_bias=False, name='f1' + stag, reuse=tf.AUTO_REUSE)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 10, activation=tf.nn.sigmoid, use_bias=False, name='f1_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 5, activation=tf.nn.sigmoid, use_bias=False, name='f2_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, use_bias=False, name='f3_att' + stag, reuse=tf.AUTO_REUSE)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# scores = tf.reduce_sum(din_all, 2)
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.zeros_like(scores) + 1e-8
if not forCnn:
# scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# scores = tf.where(mask, scores, paddings)
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
# if softmax_stag:
# scores = tf.nn.softmax(scores) # [B, 1, T]
# tf.summary.histogram('clu_tar scores', scores)
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
|
[
"rommeldhy@163.com"
] |
rommeldhy@163.com
|
b6974055275f4d4c538d2f027d95efb2b3997546
|
0de9850635f48fd7851340c19e78f5bcebfd917b
|
/lampara.py
|
6f954c3f98d74f47fb88ff4a303e06d8bc6a24e9
|
[] |
no_license
|
tzalejo/pyhton-base
|
133f94072f8ad9da7f9311e7434a3d5fa08269bf
|
a463e05b60b79e4c0e2d1c38932a86696fce826d
|
refs/heads/main
| 2023-01-14T14:30:08.420197
| 2020-11-30T04:58:22
| 2020-11-30T04:58:22
| 317,115,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
class Lampara:
def __init__(self):
pass
|
[
"tzalejo@gmail.com"
] |
tzalejo@gmail.com
|
a73e13191382599751df1a91276c5ea0b6aaffb2
|
0bf59691e4511ebada2efcdb8d0dc8a506c947e5
|
/binary_inverse.py
|
6e2c67697704d8bc4cdbab96eb56cf5b02ef547a
|
[] |
no_license
|
slayer96/binary_inverse
|
c48130682356ad74ec009bf434645c87eea76ad6
|
2ad4e54e00dd4b95f3aa06be4903a86adaad86ad
|
refs/heads/master
| 2021-01-10T04:13:18.133081
| 2016-02-01T23:34:27
| 2016-02-01T23:34:27
| 50,882,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
#binary inverse
#python3.4
def O(n):
if len(n) == 1 and n[0] == 0:
return [0]
#n.reverse()
dec = 0
index = 0
for i in n:
tmp = i * ((-2) ** index)
index += 1
dec += tmp
#print(dec)
dec *= -1
result = []
while dec:
tmp = dec % (-2)
dec //= (-2)
if tmp < 0:
tmp += 2
dec += 1
result.append(tmp)
#result.reverse()
return result
if __name__ == '__main__':
print(O([1, 0, 1, 1, 0, 1]))
|
[
"myhailo.mykytyn.96@mail.ru"
] |
myhailo.mykytyn.96@mail.ru
|
33fc505b3949ed8cfde4c4eb3218e19ba7f5cbfd
|
81277d47f0928be7656500d85f4201f963bb4746
|
/swapping-nodes-in-a-linked-list/swapping-nodes-in-a-linked-list.py
|
6b60f459b5b50d8bf0dd2c3cd2224b4f6c7b3f7c
|
[] |
no_license
|
siva4646/LeetCode_Python
|
db1d535a174f38b616cddfeeac412c0d02afa372
|
52bf12095996a9137b1ea213ac43e1fe07806956
|
refs/heads/main
| 2023-04-13T22:39:23.474034
| 2021-04-23T02:12:19
| 2021-04-23T02:12:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def swapNodes(self, head, k):
slow = fast = head
for _ in range(k - 1):
fast = fast.next
first_node_swap = fast
fast = fast.next
while fast != None:
slow = slow.next
fast = fast.next
slow.val, first_node_swap.val = first_node_swap.val, slow.val
return head
|
[
"sachindevatarhs777@gmail.com"
] |
sachindevatarhs777@gmail.com
|
2e324c7fe29b14a6933e5d4c9b493d4981b2b948
|
68753b38ec8e4eaaf95eae4202a9a22391080fa3
|
/learning_templates/basic_app/templatetags/__init__.py
|
134284b9dc631b704885ca912289581dc1e05bd7
|
[] |
no_license
|
siddharth288/learning-templates-django
|
9a35e0b8d102bdb71acfd23cbacd94130b558784
|
84e082c9ea56fe4a5c81cbd464aa155c40433acd
|
refs/heads/main
| 2023-06-20T08:51:33.555131
| 2021-07-21T12:33:11
| 2021-07-21T12:33:11
| 388,111,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
# tells python to treat this file as a module so that we can load it in
|
[
"siddharth@siddharth.com"
] |
siddharth@siddharth.com
|
f89bf721f31caee152bc2e8884cf341032766206
|
92aad61825bea12ccdfd8d52487b43430d4e5cf3
|
/CS61002 Labs/lab08.py
|
453dbc1351867b16f027eea44f4d91dc26a8ae92
|
[] |
no_license
|
radam0/Python
|
ec9324a18579a2d4be1bde7011182842dba79abf
|
bacefceea76f0f4367b1ef18707a65735b5dfb73
|
refs/heads/master
| 2022-04-21T23:54:02.173575
| 2020-04-22T19:34:14
| 2020-04-22T19:34:14
| 115,464,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
#Nikhil Vemula
#March 5,2016
#CS61002
## Matching DNA Program
# function for adjusting hypens for given strings
def getcurrentstring(firstString,secString):
firstlen = len(firstString)
seclen = len(secString)
if firstlen>seclen:
secString = secString.ljust(firstlen,'-')
if seclen>firstlen:
firstString = firstString.ljust(seclen,'-')
return firstString,secString
# function for getting hyphen indexes for strings
def getindexces(string):
indexes = []
for i in range(len(string)):
if string[i] == '-':
indexes.append(i+1)
return indexes
# Mainfunction for entire show
def mainfunction():
firstString = raw_input('String 1: ').lower()
secString = raw_input('String 2: ').lower()
if firstString=='' or secString=='':
print 'Given inputs are not valid, Please try again.'
return
quit_sel = 0
while not quit_sel:
selection = raw_input('\nWhat do you want to do:\n\ta (Add an Indel)\n\td (Delete an Indel)\n\ts (Score)\n\tq (Quit) : ')
if selection=='s':
firstString,secString = getcurrentstring(firstString,secString)
match = 0
misMatch = 0
Str1 = ''
Str2 = ''
for i in range(len(firstString)):
if firstString[i]=='-' or secString[i]=='-':
misMatch = misMatch+1
Str1 = Str1+firstString[i].upper()
Str2 = Str2+secString[i].upper()
else:
if firstString[i]==secString[i]:
match = match+1
Str1 = Str1+firstString[i]
Str2 = Str2+secString[i]
else:
misMatch = misMatch+1
Str1 = Str1+firstString[i].upper()
Str2 = Str2+secString[i].upper()
print '\nMatches: %s\tMismatches: %s'%(match,misMatch)
print 'Str1: %s'%(Str1)
print 'Str2: %s'%(Str2)
elif selection=='a':
stringsel = raw_input('Which string to change (1 or 2): ')
if stringsel!='1' and stringsel!='2':
print 'Given input is wrong, Please try again.'
else:
toaddString = [firstString,secString][int(stringsel)-1]
indexpoint = raw_input('At what index do you wish to place Indel (1 to %s): '%(len(toaddString)))
try:
indexpoint = int(indexpoint)
except:
indexpoint= 0
if indexpoint<1 or indexpoint>(len(toaddString)):
print 'Given input is wrong, Please try again.'
else:
hashlist = list(toaddString)
hashlist.insert(indexpoint-1,'-')
toaddString = ''.join(hashlist)
if stringsel=='1':
firstString,secString = getcurrentstring(toaddString,secString)
elif stringsel=='2':
firstString,secString = getcurrentstring(firstString,toaddString)
elif selection == 'd':
stringsel = raw_input('Which string to change (1 or 2): ')
if stringsel!='1' and stringsel!='2':
print 'Given input is wrong, Please try again.'
else:
toaddString = [firstString,secString][int(stringsel)-1]
indexes = getindexces(toaddString)
if indexes==[]:
print 'No Indels present in the selected string. Please try another option.'
else:
indexpoint = raw_input('At what index do you wish to delete Indel (%s): '%(', '.join(map(str, indexes))))
try:
indexpoint = int(indexpoint)
except:
indexpoint= 0
if indexpoint<1 or indexpoint not in indexes:
print 'Given input is wrong, Please try again.'
else:
hashlist = list(toaddString)
del hashlist[indexpoint-1]
toaddString = ''.join(hashlist)
if stringsel=='1':
firstString,secString = getcurrentstring(toaddString,secString)
elif stringsel=='2':
firstString,secString = getcurrentstring(firstString,toaddString)
elif selection == 'q':
quit_sel = 1
else:
print 'Given input is wrong, Please try again.'
mainfunction()
|
[
"vemulanikhil23@gmail.com"
] |
vemulanikhil23@gmail.com
|
d42c9a9a74785dff6bb67b8913af62a8e28c8a39
|
ab8fac28822d61aec0db5391d6608d84e7da4b50
|
/beyond Python V26/Modules/beyond/Reaper/Settings.py
|
f7d947af822eff92ae328bf1daedcb552790ab95
|
[] |
no_license
|
kadirahiska/Reaper
|
0a054e1b905718541c3f9f38317a3268bf5d8ef5
|
b43599b8ffffc26e31b52c33e5a25183c1e1bb48
|
refs/heads/master
| 2023-03-17T03:12:12.147940
| 2014-12-23T10:42:18
| 2014-12-23T10:42:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
# Step 1 - RemoteControl.py Action and its Cmd ID
# =======================================================================
Reaper_RemoteControl_CmdID = 55310 # Will this change?
# From Reaper's "Actions/Show Action List…" press "ReaScript: New/load…"
# Find and select "...\Modules\beyond\Reaper\RemoteControl.py"
# Back on the Actions list, you will see "Custom: RemoteControl.py"
# Right click on the Actions list and check "Show action IDs"
# Find the "Cmd ID" column for "Custom: RemoteControl.py"
# Enter that Cmd ID number above.
#
# NOTE that this number will shift and change when a previously defined
# Action is deleted or a Reaper extension like SWS is un/installed.
#
# In that case, look up the changed Cmd ID from Actions and update it above.
#
# In the future, when Reaper's OSC supports the stable "Custom ID",
# this will no longer be an issue. In the mean time, try to setup
# RemoteControl.py early on, so that any Actions you may add or delete
# afterwards will not change its Cmd ID.
# Step 2 - Reaper's OSC and Addresses
# =======================================================================
Reaper_OSC_Address = ("localhost", 8000)
External_Program_Address = ("localhost", 8001)
# From Reaper's "Options/Preferences" select "Control Surfaces" page.
#
# If the "Control Surfaces" has an item beginning with "OSC:", you have
# a preexisting OSC setup, see below.
#
# Press "Add" and select "OSC (Open Sound Control)".
#
# Make sure the "Pattern config:" is set to Default.
#
# Check and Activate "Receive on port:"
#
# The default Port of 8000 and the "localhost" addresses here are fine
# for connecting to Reaper locally on the same computer only. If you need
# to connect to Reaper across a network, change these addresses to match
# Reaper and where External Programs will be running.
#
#
# Preexisting OSC Setup:
# ======================
#
# If your "Pattern config:" is not Default, make sure you have the following
# line in your Pattern config file:
#
# ACTION i/action t/action/@
#
# Or, you may setup another Default OSC configuration as above on
# another Port and enter that Port above.
# Step 3 - Python Executable
# =======================================================================
Python = "/Library/Frameworks/Python.framework/Versions/3.4/Resources/Python.app/Contents/MacOS/Python"
# Enter the path of your preferred Python executable
#
# On Windows, example:
# Python = r"C:\Python34\pythonw.exe"
# Keep the r" to preserve the \'s
#
# On OSX, example:
# Python = "/Library/Frameworks/Python.framework/Versions/3.4/Resources/Python.app/Contents/MacOS/Python"
#
# This will allow you to launch External beyond.Reaper programs as
# Reaper Actions bound to keyboard shortcuts, menus and toolbars.
|
[
"sizeoverload@gmail.com"
] |
sizeoverload@gmail.com
|
57267d1bdde9f9c17bb0ad52191acc03c69df048
|
29d7af6c713388a43d86e8cfe74e037fc0b6b49d
|
/marketing/mixins.py
|
867cc96549925cf6f2423e49d583fabc0123eea4
|
[] |
no_license
|
austin89213/Ecommerce_Website
|
e84757529a1a30108d7fc5d603b1e95b1e209b19
|
4e817b78504ffdc824a666eb1b7c85c2d7e35a13
|
refs/heads/master
| 2023-03-25T07:18:19.242432
| 2021-03-23T17:20:22
| 2021-03-23T17:20:22
| 290,863,350
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class CsrfExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CsrfExemptMixin, self).dispatch(request, *args, **kwargs)
|
[
"austin89213@gmail.com"
] |
austin89213@gmail.com
|
9eeb70bab827b9df8c2266cdf85a2546586a3caf
|
abee6a09225face843aaa5960460aca3d0c20ff6
|
/sorting_animate.py
|
24b4b93ad1f37755cc564df49ca9d9a4427e0b09
|
[] |
no_license
|
ankit-kumar-dwivedi/SortingwithTurtle
|
ab7596065149be4f64825e4fa53fd82bf1e70d51
|
120d16d16f26c41164a3ba13290b4b6825ef1a73
|
refs/heads/master
| 2020-03-25T07:20:21.671603
| 2018-08-04T19:02:20
| 2018-08-04T19:02:20
| 143,554,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,835
|
py
|
"""Just sorting algorithms"""
from turtle import *
import random
class Block(Turtle):
def __init__(self, size):
self.size = size
Turtle.__init__(self, shape="square", visible=False)
self.pu()
self.shapesize(size * 1.5, 1.5, 2) # square-->rectangle
self.fillcolor("black")
self.st()
def glow(self):
self.fillcolor("red")
def unglow(self):
self.fillcolor("black")
def __repr__(self):
return "Block size: {0}".format(self.size)
class Shelf(list):
def __init__(self, y):
"create a shelf. y is y-position of first block"
self.y = y
self.x = -150
def push(self, d):
width, _, _ = d.shapesize()
# align blocks by the bottom edge
y_offset = width / 2 * 20
d.sety(self.y + y_offset)
d.setx(self.x + 34 * len(self))
self.append(d)
def _close_gap_from_i(self, i):
for b in self[i:]:
xpos, _ = b.pos()
b.setx(xpos - 34)
def _open_gap_from_i(self, i):
for b in self[i:]:
xpos, _ = b.pos()
b.setx(xpos + 34)
def pop(self, key):
b = list.pop(self, key)
b.glow()
b.sety(200)
self._close_gap_from_i(key)
return b
def insert(self, key, b):
self._open_gap_from_i(key)
list.insert(self, key, b)
b.setx(self.x + 34 * key)
width, _, _ = b.shapesize()
# align blocks by the bottom edge
y_offset = width / 2 * 20
b.sety(self.y + y_offset)
b.unglow()
def isort(shelf):
length = len(shelf)
for i in range(1, length):
hole = i
while hole > 0 and shelf[i].size < shelf[hole - 1].size:
hole = hole - 1
shelf.insert(hole, shelf.pop(i))
return
def ssort(shelf):
length = len(shelf)
for j in range(0, length - 1):
imin = j
for i in range(j + 1, length):
if shelf[i].size < shelf[imin].size:
imin = i
if imin != j:
shelf.insert(j, shelf.pop(imin))
def partition(shelf, left, right, pivot_index):
pivot = shelf[pivot_index]
shelf.insert(right, shelf.pop(pivot_index))
store_index = left
for i in range(left, right): # range is non-inclusive of ending value
if shelf[i].size < pivot.size:
shelf.insert(store_index, shelf.pop(i))
store_index = store_index + 1
shelf.insert(store_index, shelf.pop(right)) # move pivot to correct position
return store_index
def qsort(shelf, left, right):
if left < right:
pivot_index = left
pivot_new_index = partition(shelf, left, right, pivot_index)
qsort(shelf, left, pivot_new_index - 1)
qsort(shelf, pivot_new_index + 1, right)
def randomize():
disable_keys()
clear()
target = list(range(10))
random.shuffle(target)
for i, t in enumerate(target):
for j in range(i, len(s)):
if s[j].size == t + 1:
s.insert(i, s.pop(j))
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def show_text(text, line=0):
line = 20 * line
goto(0,-250 - line)
write(text, align="center", font=("Courier", 16, "bold"))
def start_ssort():
disable_keys()
clear()
show_text("Selection Sort")
ssort(s)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def start_isort():
disable_keys()
clear()
show_text("Insertion Sort")
isort(s)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def start_qsort():
disable_keys()
clear()
show_text("Quicksort")
qsort(s, 0, len(s) - 1)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def init_shelf():
global s
s = Shelf(-200)
vals = (4, 2, 8, 9, 1, 5, 10, 3, 7, 6)
for i in vals:
s.push(Block(i))
def disable_keys():
onkey(None, "s")
onkey(None, "i")
onkey(None, "q")
onkey(None, "r")
def enable_keys():
onkey(start_isort, "i")
onkey(start_ssort, "s")
onkey(start_qsort, "q")
onkey(randomize, "r")
onkey(bye, "space")
def main():
getscreen().clearscreen()
ht(); penup()
init_shelf()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
listen()
return "EVENTLOOP"
instructions1 = "press i(insertion sort), s(selection sort), q (quicksort)"
instructions2 = "spacebar to quit, r randomize"
if __name__=="__main__":
msg = main()
mainloop()
|
[
"noreply@github.com"
] |
ankit-kumar-dwivedi.noreply@github.com
|
0fede212f4aa9e77c525520f56685faacd3c8017
|
b31497e3577a17d46e85b3a78f32dfe82b80638e
|
/SandBoxies/Sandbox4CorpusTables.py
|
80992b9863a254f77e3da8519c0c48d57b0f137e
|
[
"MIT"
] |
permissive
|
dpritsos/DoGSWrapper
|
ca954531ad241f2cfad442234e85dd929da15b8d
|
6f20a7dbcd61339f39bf0acf79c9fffbb1679ecc
|
refs/heads/master
| 2021-01-17T15:04:09.950521
| 2020-05-11T10:18:48
| 2020-05-11T10:18:48
| 17,569,177
| 1
| 0
| null | 2018-10-20T13:08:32
| 2014-03-09T17:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
import numpy as np #numpy.linalg for SVD etc.
#import numpy.linalg as alg
import scipy.spatial.distance as spd
import sklearn.decomposition as skd #sklearn.decomposition
import tables as tb
import time
corpus_mtrx_fname = '/home/dimitrios/Synergy-Crawler/Santinis_7-web_genre/Kfolds_Vocs_Inds_4Chars_7Genres/kfold_CorpusMatrix_9100000.h5'
#Loading a table from any corpus available at a time.
h5f = tb.open_file(corpus_mtrx_fname, 'r')
corpus_mtrx = h5f.get_node('/', 'corpus_earray')
#Testing Mahalanobis distance using randomly selected dimentions and timeing it.
#Using projections of a random vector on new PC/Coordinates system.
def RandomMahal():
print "COVARIANCE Manualy Made:"
ts = time.time()
#Calculate Variables Means
mean_vect = np.mean(corpus_mtrx[0:100, 0:50000], axis=0)
#Convert Raw data vectors to Zero-mean Vectors
data_zero_mean = corpus_mtrx[0:100, 0:50000] - mean_vect
#Display Manualy Crreated Covariance Matrix:
print np.matrix(data_zero_mean).T.shape
cov1 = np.ascontiguousarray( np.matrix(data_zero_mean).T ) * np.matrix(data_zero_mean) / (np.matrix(data_zero_mean).shape[0] - 1)
print cov1
print np.linalg.eig(cov1)
ts -= time.time()
print ts
print
print "COVARIANCE"
#ts = time.time()
#print np.cov(corpus_mtrx[0:100, 0:50000].T)
#ts -= time.time()
#print ts
print
print "COVARIANCE from SVD"
ts = time.time()
U, S, V = np.linalg.svd(data_zero_mean, full_matrices = False)
#print np.diag(S)
print U.shape, np.matrix(np.diag(S)).shape, V.shape
#print np.matrix(U).T.shape
print len(S)
#print np.ascontiguousarray( np.matrix(V).T ) * ( np.ascontiguousarray( np.matrix(np.diag(S)).T ) * np.matrix(np.diag(S)) ) * np.ascontiguousarray( np.matrix(V) ) / (np.matrix(data_zero_mean).shape[0] - 1)
#print np.matrix(U).shape, (np.matrix(np.diag(S)) * np.matrix(np.diag(S)).T).shape, np.matrix(U).shape
# np.matrix(U) * (np.matrix(np.diag(S)) * np.matrix(np.diag(S)).T) * np.matrix(U).T #SAME AS# print np.matrix(data_zero_mean) * np.matrix(data_zero_mean).T
ts -= time.time()
print ts
RandomMahal()
#Testing Mahalanobis distnace using PCA/SVD for selecting the Principal Components.
#Using projections of a random vector on new PC/Coordinates system.
def PCAMahal():
pca = skd.PCA()
print pca.fit(corpus_mtrx)
#PCAMahal()
#mean = [.1,.1,.1,.1,.1,.5,.1,.1,.1,.1,.1,.1,.6,.1,.1,.1,.1,.1,.1]
#cov = np.diag([.5,.7,.9,.2,.10,.5,.9,.8,.9,.1,.4,.1,.1,.6,.7,.3,.5,.8,.4])
#train_set = np.random.multivariate_normal(mean,cov,10)
#estim = np.mean(train_set, axis=0)
#print np.cov(train_set.T)
#print estim
#test_vetc = [.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0,.0]
#pca = PCA()
#pca.fit(train_set)
#incov = alg.inv( alg.pinv(train_set.T) )
#print cov.shape
#print np.cov(train_set).shape
#print train_set.shape
#print incov
#test_vetc = pca.transform(test_vetc)
#mean = pca.transform(mean)
#print spd.mahalanobis(test_vetc,mean,incov)
#print incov
#print alg.pinv( np.cov(d) )
#print alg.svd(d)
#print l
h5f.close()
|
[
"dpritsos@extremepro.gr"
] |
dpritsos@extremepro.gr
|
f7c77f7c376b88637a107ea65a74ac4fbd938c63
|
73e147e1d49656fafba5d4bf84df5ded2c4dca73
|
/team_9/cocos/samples/tetrico/gamectrl.py
|
7929fc2bfb930fb43082c83b6f1ecced1246f46f
|
[
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause",
"CC-BY-NC-ND-3.0"
] |
permissive
|
Donnyvdm/dojo19
|
2278747366c57bfc80eb9ee28ca617ec0a79bae3
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
refs/heads/master
| 2020-07-26T12:22:15.882800
| 2019-09-15T20:34:36
| 2019-09-15T20:34:36
| 208,642,183
| 1
| 0
|
BSD-3-Clause
| 2019-09-15T18:57:53
| 2019-09-15T18:57:52
| null |
UTF-8
|
Python
| false
| false
| 2,744
|
py
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
# stdlib
import copy
import random
# pyglet related
import pyglet
from pyglet.window import key
# cocos2d related
from cocos.layer import Layer
from cocos.scene import Scene
from cocos.euclid import Point2
# tetrico related
from constants import *
from status import status
__all__ = ['GameCtrl']
#
# Controller ( MVC )
#
class GameCtrl( Layer ):
is_event_handler = True #: enable pyglet's events
def __init__(self, model):
super(GameCtrl,self).__init__()
self.used_key = False
self.paused = True
self.model = model
self.elapsed = 0
def on_key_press(self, k, m ):
if self.paused:
return False
if self.used_key:
return False
if k in (key.LEFT, key.RIGHT, key.DOWN, key.UP, key.SPACE):
if k == key.LEFT:
self.model.block_left()
elif k == key.RIGHT:
self.model.block_right()
elif k == key.DOWN:
self.model.block_down()
elif k == key.UP:
self.model.block_rotate()
elif k == key.SPACE:
# let the player move the block after it was dropped
self.elapsed = 0
self.model.block_drop()
self.used_key = True
return True
return False
def on_text_motion(self, motion):
if self.paused:
return False
if self.used_key:
return False
if motion in (key.MOTION_DOWN, key.MOTION_RIGHT, key.MOTION_LEFT):
if motion == key.MOTION_DOWN:
self.model.block_down()
elif motion == key.MOTION_LEFT:
self.model.block_left()
elif motion == key.MOTION_RIGHT:
self.model.block_right()
self.used_key = True
return True
return False
def pause_controller( self ):
'''removes the schedule timer and doesn't handler the keys'''
self.paused = True
self.unschedule( self.step )
def resume_controller( self ):
'''schedules the timer and handles the keys'''
self.paused = False
self.schedule( self.step )
def step( self, dt ):
'''updates the engine'''
self.elapsed += dt
if self.elapsed > status.level.speed:
self.elapsed = 0
self.model.block_down( sound=False)
def draw( self ):
'''draw the map and the block'''
self.used_key = False
|
[
"a.campello@wellcome.ac.uk"
] |
a.campello@wellcome.ac.uk
|
139ba1991dd4c1556129b5267f93f090572a05eb
|
5ed7f9b222f0af3a5ad0b7ee60c8177d0c539a2b
|
/src/utils/utils_exception.py
|
199e4875f121a2e45724141e24725cf9faf0c5a2
|
[] |
no_license
|
langqy/AutoTestFramework
|
8a1da890ff5a71e5b397434bb6d654563ef71916
|
1ea8966de10fb7a3f153a2fc8256b42486d75f66
|
refs/heads/master
| 2021-01-12T12:01:58.860191
| 2016-09-23T09:41:09
| 2016-09-23T09:41:09
| 69,223,762
| 1
| 1
| null | 2016-09-26T07:18:54
| 2016-09-26T07:18:53
| null |
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
# -*- coding: utf-8 -*-
from selenium.common.exceptions import WebDriverException
from ConfigParser import NoSectionError, NoOptionError
class Error(Exception):
"""Base package Exception."""
pass
class FileException(Error):
"""Base file exception.Thrown when a file is not available.
For example:
file not exists.
"""
pass
class ConfigFileException(FileException):
"""Thrown when config file not exists."""
pass
class ConfigError(Error):
"""Thrown when basic config error, such as no [path] section or no 'base' option."""
pass
class DataFileNotAvailableException(FileException):
"""Thrown when data file not available."""
pass
class SheetTypeError(Error):
"""Thrown when sheet type passed in not int or str."""
pass
class SheetError(Error):
"""Thrown when specified sheet not exists."""
pass
class DataError(Error):
"""Thrown when something wrong with the data."""
pass
class LogFileNotAvailableException(FileException):
"""Thrown when log file not available."""
pass
class LogError(Error):
"""Thrown when something wrong when logging."""
pass
class ReportFileNotAvailableException(FileException):
"""Thrown when report file not available."""
pass
class ReportError(Error):
"""Thrown when something wrong when generate the report file."""
pass
class DriverNotExistsException(WebDriverException):
"""Thrown when driver not exists."""
pass
class UnSupportBrowserTypeException(WebDriverException):
"""Thrown when the browser type not support."""
pass
class ParameterError(Error):
"""Thrown when pass wrong parameter to a method."""
pass
class UploadFileError(Error):
"""Thrown when upload files not available."""
pass
class UploadWindowNotOpenError(Error):
"""Thrown when upload window not open."""
pass
class UploadWindowOpenError(Error):
"""Thrown when open upload window error."""
pass
class UnSupportMethod(Error):
"""Thrown when http method not allowed."""
pass
|
[
"396214358@qq.com"
] |
396214358@qq.com
|
515eff533e37237d92ab920547b3690d551abaa6
|
0ef4371c87c2196d9c2d2706e51f4b452f6e9d19
|
/4_Curso/Proyecto_Sistemas_Informáticos/model_exam_2/venv/lib/python3.7/site-packages/pip/_internal/cache.py
|
8ebecbbd5a62f90273255ac776095519d5fbd40d
|
[
"Apache-2.0"
] |
permissive
|
AlejandroSantorum/Apuntes_Mat_IngInf
|
49c41002314216a994aa60db04062e34abc065eb
|
c047e41d086f3028ec78ac3a663b9848862e52df
|
refs/heads/master
| 2023-05-15T03:02:56.882342
| 2023-04-20T20:19:54
| 2023-04-20T20:19:54
| 212,392,195
| 29
| 10
|
Apache-2.0
| 2023-09-09T13:03:45
| 2019-10-02T16:44:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,371
|
py
|
"""Cache Management
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import errno
import hashlib
import logging
import os
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.models.link import Link
from pip._internal.utils.compat import expanduser
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.wheel import InvalidWheelFilename, Wheel
if MYPY_CHECK_RUNNING:
from typing import Optional, Set, List, Any
from pip._internal.index import FormatControl
from pip._internal.pep425tags import Pep425Tag
logger = logging.getLogger(__name__)
class Cache(object):
"""An abstract class - provides cache directories for data from links
:param cache_dir: The root of the cache.
:param format_control: An object of FormatControl class to limit
binaries being read from the cache.
:param allowed_formats: which formats of files the cache should store.
('binary' and 'source' are the only allowed values)
"""
def __init__(self, cache_dir, format_control, allowed_formats):
# type: (str, FormatControl, Set[str]) -> None
super(Cache, self).__init__()
self.cache_dir = expanduser(cache_dir) if cache_dir else None
self.format_control = format_control
self.allowed_formats = allowed_formats
_valid_formats = {"source", "binary"}
assert self.allowed_formats.union(_valid_formats) == _valid_formats
def _get_cache_path_parts(self, link):
# type: (Link) -> List[str]
"""Get parts of part that must be os.path.joined with cache_dir
"""
# We want to generate an url to use as our cache key, we don't want to
# just re-use the URL because it might have other items in the fragment
# and we don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and
# thus less secure). However the differences don't make a lot of
# difference for our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top
# level directories where we might run out of sub directories on some
# FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
return parts
def _get_candidates(self, link, package_name):
# type: (Link, Optional[str]) -> List[Any]
can_not_cache = (
not self.cache_dir or
not package_name or
not link
)
if can_not_cache:
return []
canonical_name = canonicalize_name(package_name)
formats = self.format_control.get_allowed_formats(
canonical_name
)
if not self.allowed_formats.intersection(formats):
return []
root = self.get_path_for_link(link)
try:
return os.listdir(root)
except OSError as err:
if err.errno in {errno.ENOENT, errno.ENOTDIR}:
return []
raise
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached items in for link.
"""
raise NotImplementedError()
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Pep425Tag]
):
# type: (...) -> Link
"""Returns a link to a cached item if it exists, otherwise returns the
passed link.
"""
raise NotImplementedError()
def _link_for_candidate(self, link, candidate):
# type: (Link, str) -> Link
root = self.get_path_for_link(link)
path = os.path.join(root, candidate)
return Link(path_to_url(path))
def cleanup(self):
# type: () -> None
pass
class SimpleWheelCache(Cache):
"""A cache of wheels for future installs.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(SimpleWheelCache, self).__init__(
cache_dir, format_control, {"binary"}
)
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Pep425Tag]
):
# type: (...) -> Link
candidates = []
for wheel_name in self._get_candidates(link, package_name):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported(supported_tags):
# Built for a different python/arch/etc
continue
candidates.append(
(wheel.support_index_min(supported_tags), wheel_name)
)
if not candidates:
return link
return self._link_for_candidate(link, min(candidates)[1])
class EphemWheelCache(SimpleWheelCache):
"""A SimpleWheelCache that creates it's own temporary cache directory
"""
def __init__(self, format_control):
# type: (FormatControl) -> None
self._temp_dir = TempDirectory(kind="ephem-wheel-cache")
super(EphemWheelCache, self).__init__(
self._temp_dir.path, format_control
)
def cleanup(self):
# type: () -> None
self._temp_dir.cleanup()
class WheelCache(Cache):
"""Wraps EphemWheelCache and SimpleWheelCache into a single Cache
This Cache allows for gracefully degradation, using the ephem wheel cache
when a certain link is not found in the simple wheel cache first.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(WheelCache, self).__init__(
cache_dir, format_control, {'binary'}
)
self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
self._ephem_cache = EphemWheelCache(format_control)
def get_path_for_link(self, link):
# type: (Link) -> str
return self._wheel_cache.get_path_for_link(link)
def get_ephem_path_for_link(self, link):
# type: (Link) -> str
return self._ephem_cache.get_path_for_link(link)
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Pep425Tag]
):
# type: (...) -> Link
retval = self._wheel_cache.get(
link=link,
package_name=package_name,
supported_tags=supported_tags,
)
if retval is not link:
return retval
return self._ephem_cache.get(
link=link,
package_name=package_name,
supported_tags=supported_tags,
)
def cleanup(self):
# type: () -> None
self._wheel_cache.cleanup()
self._ephem_cache.cleanup()
|
[
"alejandro.santorum@gmail.com"
] |
alejandro.santorum@gmail.com
|
5e78c2d4c4a562ebf196ffaa7d086e43931cd05b
|
467a320ff8e80e92d9ad42d514d45858ab4bc8f5
|
/Archived_Versions/TankDuel.py
|
397e58d8080e3610f6b839db3d1d44df464f9ba9
|
[] |
no_license
|
theTrueEnder/TankDuel-for-World-of-Tanks-Blitz
|
c81cd8015e97095f6c873355726b6371269c8dfb
|
c9fe1f49949495a592bd09dce8dcd589430e7372
|
refs/heads/master
| 2022-12-26T13:46:46.080283
| 2020-09-19T20:33:15
| 2020-09-19T20:33:15
| 296,938,145
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,183
|
py
|
#Copyright 2020, Kai Delsing, all rights reserved
##TO-DO##
#classes for each tank (no [2] pointers, etc, tank2.hp)
#av vs max/min rolls
#shell velocity and distancwe
#Name, HP, Damage, Reload, Adrenaline
tank1 = ['Obj. 704', 1500, 640, 11.79, False]
tank2 = ['T-55A', 1650, 310, 6.32, False]
################################################################
time = [0.000, tank1[3], tank2[3]]
active = True
adr = [0, 0, 75, 75]
def tank1_fire():
tank2[1] -= tank1[2]
print(tank1[0], 'fires, dealing', tank1[2], 'damage.')
state()
time[1] = tank1[3]
def tank2_fire():
tank1[1] -= tank2[2]
print(tank2[0], 'fires, dealing', tank2[2], 'damage.')
state()
time[2] = tank2[3]
def state():
print('Time: {:0.2f}'.format(time[0]), 'seconds')
print(tank1[0], 'has', tank1[1], 'HP remaining.')
print(tank2[0], 'has', tank2[1], 'HP remaining.\n')
def check_sim():
if tank1[1] <= 0 or tank2[1] <=0:
return False
else:
return True
def run_sim():
if tank1[4]:
adr[0] = 1.2
else:
adr[0] = 1
if tank2[4]:
adr[1] = 1.2
else:
adr[1] = 1
print('Begin Simulation:', tank1[0], 'vs', tank2[0])
tank1_fire()
time[0] += .001
tank2_fire()
while(time[0] < 1000):
time[0] += .001
adr[2] -= 0.001
adr[3] -= 0.001
time[1], time[2] = time[1] - (.001 * adr[0]), time[2] - (.001 * adr[1])
if time[1] <= 0:
i = time[1]
tank1_fire()
time[1] -= i #eliminate error from excess speed
if not check_sim():
break;
if time[2] <= 0:
i = time[2]
tank2_fire()
time[2] -= i #eliminate error from excess speed
if not check_sim():
break;
print('Simulation Complete.')
state()
if time[1] <= .001 or time[2] <= .001:
print('Result: Tie')
print('Tanks destroyed each other within margin of error.')
elif tank1[1] > tank2[1]:
print('Result:', tank1[0], 'wins.')
elif tank1[1] < tank2[1]:
print('Result:', tank2[0], 'wins.')
else:
print('Error')
run_sim()
|
[
"Kai"
] |
Kai
|
80b74e908fcaf01359e389423255e9543b69d931
|
328db27bcee118ea3d841120de56c203fd6ab05f
|
/coc-stats-gen.py
|
da54a4aa79188130a3aff0abf818388ac616e776
|
[
"MIT"
] |
permissive
|
mikeshoe/coc-stats-gen
|
ec80a0fcdef27d0f8ddc57fc69b626d21b529217
|
d1f9322a74b73ec0917fb1d9202b84f58c3cfeab
|
refs/heads/master
| 2021-01-11T04:16:24.058262
| 2016-10-21T18:42:25
| 2016-10-21T18:42:25
| 71,191,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
#! /usr/local/bin/python
'''
Created on Oct 17, 2016
@author: mike
'''
import xlrd
import time
import datetime
if __name__ == '__main__':
MY_DIR = "/Users/mike/Documents/coc/"
XL_FILE = "CerebralChaosStats.xlsx"
XL_FILE_PATH = MY_DIR + XL_FILE
XL_PLAYER_WS = "Player Metrics"
XL_WAR_WS = "War Metrics"
workbook = xlrd.open_workbook(XL_FILE_PATH)
player_worksheet = workbook.sheet_by_name(XL_PLAYER_WS)
war_worksheet = workbook.sheet_by_name(XL_WAR_WS)
#print "Rank:", worksheet.cell(2, 0).value
#print "Level:", worksheet.cell(2, 1).value
#print "Player:", worksheet.cell(2, 2).value
#print "Defensibility:", worksheet.cell(2, 3).value
#print "Included:", worksheet.cell(2, 4).value
#print "Attack Utilization:", worksheet.cell(2, 5).value
#print "Star Differential:", worksheet.cell(2, 6).value
#print "Star Differential Average:", worksheet.cell(2, 7).value
#print "Total Destruction:", worksheet.cell(2, 8).value
html_str = ""
star_diff_color = ""
star_diff_avg_color = ""
for row_index in range (2,28):
html_str = html_str + "<tr>\n"
rank = player_worksheet.cell(row_index, 0).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(rank) + "</td>\n"
level = player_worksheet.cell(row_index, 1).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(int(level)) + "</td>\n"
player = player_worksheet.cell(row_index, 2).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(player) + "</td>\n"
defense = player_worksheet.cell(row_index, 3).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(round(defense,1)) + "</td>\n"
included = player_worksheet.cell(row_index, 4).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(int(included)) + "</td>\n"
attack_util = player_worksheet.cell(row_index, 5).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(round(attack_util,2)*100) + "%</td>\n"
star_diff = player_worksheet.cell(row_index, 6).value
if star_diff < 0:
star_diff_color = "style=\"text-align: center; color: red;\""
else:
star_diff_color = "style=\"text-align: center;\""
html_str = html_str + " <td " + star_diff_color +" >" + str(int(star_diff)) + "</td>\n"
star_diff_avg = player_worksheet.cell(row_index, 7).value
html_str = html_str + " <td " + star_diff_color +" >" + str(round(star_diff_avg, 1)) + "</td>\n"
tot_dest = player_worksheet.cell(row_index, 8).value
html_str = html_str + " <td style=\"text-align: center;\">" + str(int(tot_dest)) + "</td>\n"
html_str = html_str + "</tr>\n"
html_str = html_str + "\n\n\n\n"
#print html_str
utc_ts = datetime.datetime.utcnow()
utc_string = utc_ts.strftime('%Y-%m-%d-%H%M%SZ')
# Open a file
filename = MY_DIR + "coc-stats-" + utc_string + ".txt"
print filename
fo = open(filename, "wb")
fo.write(html_str);
# Close opend file
fo.close()
|
[
"mike@shoesobjects.com"
] |
mike@shoesobjects.com
|
848c3cc97d6fef8cf42b5ffbd94a9cfaf4a08159
|
4ac8fc7f26ddfeb71078f7fd4cc2a35c601d2d4a
|
/core/models.py
|
01e19f5a51ee71626b1d50faec929ff7c141e442
|
[] |
no_license
|
gillie1022/question-box
|
7ea1d43f045e7abeb82ad83139d68efaa66fdc9f
|
12c737f3335a2ed090a1007b91c39c0c529a9b87
|
refs/heads/main
| 2022-12-02T05:55:59.598476
| 2020-06-25T18:14:32
| 2020-06-25T18:14:32
| 286,825,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
from django.db import models
from users.models import User
from django.db.models import Q
from django.contrib.postgres.search import SearchVector
class Question(models.Model):
user = models.ForeignKey(
to=User, on_delete=models.CASCADE, related_name="questions"
)
title = models.CharField(max_length=255)
body = models.CharField(max_length=500)
asked_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
starred_by = models.ManyToManyField(to=User, related_name="starred_questions")
def __str__(self):
return self.title
class Answer(models.Model):
author = models.ForeignKey(
to=User, on_delete=models.CASCADE, null=True, related_name="answers"
)
question = models.ForeignKey(
to=Question, on_delete=models.CASCADE, related_name="answers"
)
body = models.CharField(verbose_name="Response", max_length=500)
answered_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
marked_correct = models.BooleanField(default=False)
def is_marked_correct(self, answer):
return self.marked_correct.filter(pk=answer.pk).count() == 1
def __str__(self):
return self.body
def search_questions_for_user(user, search_term):
questions = Question.objects.all()
return questions \
.annotate(search=SearchVector('title', 'body', 'answers__body')) \
.filter(search=search_term) \
.distinct('pk')
|
[
"dave.gillie@icloud.com"
] |
dave.gillie@icloud.com
|
5230f72826ff9866fd73f5f90d37d9be2cf58bc2
|
d4c82eb9ae3037cf8742c3fc8c31cf4a80f5d21c
|
/examples/Python/examples/Demo/scripts/update.py
|
5e44d3ab0f01709540f9165030f0f97f90ca127d
|
[] |
no_license
|
gzhu-team-509/programming-knowledge-base
|
68132b1a669f208dab94dcf2401ce39d89ebe53b
|
3f3d026927157b7fdf210da195cb912366975e75
|
refs/heads/master
| 2021-05-05T12:17:12.686569
| 2017-11-04T07:30:28
| 2017-11-04T07:30:28
| 104,754,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
#! /usr/bin/python2.6
# Update a bunch of files according to a script.
# The input file contains lines of the form <filename>:<lineno>:<text>,
# meaning that the given line of the given file is to be replaced
# by the given text. This is useful for performing global substitutions
# on grep output:
import os
import sys
import re
pat = '^([^: \t\n]+):([1-9][0-9]*):'
prog = re.compile(pat)
class FileObj:
def __init__(self, filename):
self.filename = filename
self.changed = 0
try:
self.lines = open(filename, 'r').readlines()
except IOError, msg:
print '*** Can\'t open "%s":' % filename, msg
self.lines = None
return
print 'diffing', self.filename
def finish(self):
if not self.changed:
print 'no changes to', self.filename
return
try:
os.rename(self.filename, self.filename + '~')
fp = open(self.filename, 'w')
except (os.error, IOError), msg:
print '*** Can\'t rewrite "%s":' % self.filename, msg
return
print 'writing', self.filename
for line in self.lines:
fp.write(line)
fp.close()
self.changed = 0
def process(self, lineno, rest):
if self.lines is None:
print '(not processed): %s:%s:%s' % (
self.filename, lineno, rest),
return
i = eval(lineno) - 1
if not 0 <= i < len(self.lines):
print '*** Line number out of range: %s:%s:%s' % (
self.filename, lineno, rest),
return
if self.lines[i] == rest:
print '(no change): %s:%s:%s' % (
self.filename, lineno, rest),
return
if not self.changed:
self.changed = 1
print '%sc%s' % (lineno, lineno)
print '<', self.lines[i],
print '---'
self.lines[i] = rest
print '>', self.lines[i],
def main():
if sys.argv[1:]:
try:
fp = open(sys.argv[1], 'r')
except IOError, msg:
print 'Can\'t open "%s":' % sys.argv[1], msg
sys.exit(1)
else:
fp = sys.stdin
curfile = None
while 1:
line = fp.readline()
if not line:
if curfile: curfile.finish()
break
n = prog.match(line)
if n < 0:
print 'Funny line:', line,
continue
filename, lineno = prog.group(1, 2)
if not curfile or filename <> curfile.filename:
if curfile: curfile.finish()
curfile = FileObj(filename)
curfile.process(lineno, line[n:])
if __name__ == "__main__":
main()
|
[
"lightyears1998@hotmail.com"
] |
lightyears1998@hotmail.com
|
4b9dad88c993897ba4aadd6fd538b20926a43b97
|
a70325c82e2486d67624dea1e053924fb9a5af06
|
/PiBotXBeeExperiments/ZmqFastSubscriber.py
|
954f1fd788cf73bef4c27d4ac1c503ace09cd2fc
|
[
"MIT"
] |
permissive
|
RocketRedNeck/PythonPlayground
|
88ce1b46c0134ea0d4fac6e5840e1325c44d6d92
|
a6f002625c9326de6995bc52960f25f78e9b2843
|
refs/heads/master
| 2023-05-26T09:42:22.960096
| 2023-05-15T00:22:47
| 2023-05-15T00:22:47
| 63,455,232
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
# Standard
import time
# 3rd Party
import zmq
# Local
import DataModel
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://localhost:12345') # In the guide, the subscribers connect to well know addresses (in this we stay local)
socket.setsockopt_string(zmq.SUBSCRIBE, '') # If you don't actually subscribe, you don't get anything! In this case any topic.
socket.RCVTIMEO = 1000 # timeout in milliseconds so we can tell if there is anything happening while waiting for data
socket.set_hwm(1) # High Water Marks help define the backlog we can handle, in this case if we are behind, just let things drop
header = DataModel.Header()
wait_count = 0
last_header_count = 0
while True:
try:
pyobj = socket.recv_pyobj()
type_pyobj = type(pyobj)
if isinstance(pyobj,DataModel.Header):
print(f'@ {pyobj.time} = {pyobj.count}')
if last_header_count != 0 and pyobj.count > last_header_count + 1:
print(f'Lost {pyobj.count - last_header_count - 1} Messages !!!!!')
last_header_count = pyobj.count
else:
print(f'Unknown Type Received: {type_pyobj}')
wait_count = 0
except KeyboardInterrupt:
break
except zmq.error.Again:
wait_count += 1
print(f'Waiting {wait_count}...')
|
[
"the.rocketredneck@gmail.com"
] |
the.rocketredneck@gmail.com
|
c3977d5efccd68fcd03236ed68cf1a4f3e3d36e6
|
2fc9c51dbf57f72d7499359795c3f6ab5aada522
|
/graph/boj2251.py
|
f9cb04da48dfa586ca3b0c7855631f2c6015c72f
|
[] |
no_license
|
chankoo/problem-solving
|
9e45bed20310e5847392c2e9e88c0b7d41b73de6
|
aae3b469b0644d39f774946798130566a310a20b
|
refs/heads/master
| 2020-04-27T11:51:11.214826
| 2019-10-12T10:26:22
| 2019-10-12T10:26:22
| 174,311,189
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
# https://www.acmicpc.net/problem/2251
# sol
# 물통의 수가 3개 뿐이고 가능한 경우의 수가 많지 않으므로 dfs로 풀었다
# 가능한 물통들의 상태를 status 배열로 만들고 이 배열 자체로 방문체크를 하였다(멤버쉽 연산 이용)
# 이때 mutable 타입은 멤버쉽 연산이 가능한 key로 hash될 수 없기에 visited에 넣을땐 tuple로 변환하였다
def pour(_from, _to, status):
global V
if status[_from] > 0:
n_status = [*status]
if V[_to] - status[_to] == 0:
return status
else:
if V[_to] - status[_to] >= status[_from]:
n_status[_to] = status[_to] + status[_from]
n_status[_from] = 0
else:
n_status[_to] = V[_to]
n_status[_from] = n_status[_from] - (V[_to] - status[_to])
return n_status
return status
def dfs(status):
global visited, output
if status[0] == 0:
output.append(status[2])
for _from,_to in [(0,1),(0,2),(1,2),(1,0),(2,0),(2,1)]:
n_status = tuple(pour(_from, _to, status))
if n_status not in visited:
visited.add(n_status)
dfs(n_status)
if __name__ == "__main__":
V = tuple(map(int, input().split()))
visited = set()
output = []
status = [0,0,V[2]]
visited.add(tuple(status))
dfs(status)
for vol in sorted(output)[:-1]:
print(vol, end=' ')
print(sorted(output)[-1])
|
[
"ckbaek1125@gmail.com"
] |
ckbaek1125@gmail.com
|
67a462dc96b70a7f72c50eb90bed09125f1e8d8f
|
274bc4ef4ef5f18d296c94a562b491d2f85f7905
|
/day58.py
|
3dbfd1cff6ec5099f15546728fcda7efabb4778c
|
[] |
no_license
|
BanoutAlketbi/100DaysofCode
|
e7a20f4981af34be6b0690db75ec418e7ae3aedc
|
5aaf9f52e5f306030e623a8f3b5e60798d79f60c
|
refs/heads/master
| 2020-07-10T05:19:43.602770
| 2019-11-23T19:35:50
| 2019-11-23T19:35:50
| 204,176,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
import re
str = "The weather is rainy today"
x = re.findall("in", str)
print(x)
y = re.findall("windy", str)
print(y)
if (y):
print("Yes, there is a match")
else:
print("no match")
x = re.search("\s", str)
print("the first white-space character is located in postion:", x.start())
y = re.findall("windy", str)
print(y)
x = re.split("\s", str)
print(x)
|
[
"banoot@Banoots-MBP-5.home"
] |
banoot@Banoots-MBP-5.home
|
4c6f9178b232902d6851301a1aefc4b240271b0b
|
b5c3c06245c91b732b6a1d58220c5946bd4a86a7
|
/test/char_ngrams_test.py
|
93ef739f96796eb2524525584797e381ef825e59
|
[
"MIT"
] |
permissive
|
ritlinguine/linguine-python
|
aaff5198e7d0820fa513c2bd70bd240eb6d77d5b
|
5dee96dea64523d673ae1efc88d50e9309f4dd5e
|
refs/heads/master
| 2021-08-29T08:49:18.041601
| 2021-08-16T01:55:05
| 2021-08-16T01:55:05
| 90,911,583
| 1
| 1
| null | 2017-05-10T21:53:50
| 2017-05-10T21:53:50
| null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
import unittest
from linguine.corpus import Corpus
from linguine.ops.char_ngrams import CharNgrams
class CharNgramsOpTest(unittest.TestCase):
def setUp(self):
self.op = CharNgrams()
def test_run(self):
self.test_data = [Corpus("0", "Test", "The quick brown fox jumped over the lazy dog.\n")]
results = self.op.run(self.test_data)
desired_results = [{"corpus_id": "0",
"unigrams": {"_": 8, "a": 1, "b": 1, "c": 1, "d": 2, "e": 4, "f": 1, "g": 1, "h": 2, "i": 1,
"j": 1, "k": 1, "l": 1, "m": 1, "n": 1, "o": 4, "p": 1, "q": 1, "r": 2, "t": 2,
"u": 2, "v": 1, "w": 1, "x": 1, "y": 1, "z": 1},
"bigrams": {"_b": 1, "_d": 1, "_f": 1, "_j": 1, "_l": 1, "_o": 1, "_q": 1, "_t": 1, "az": 1,
"br": 1, "ck": 1, "d_": 1, "do": 1, "e_": 2, "ed": 1, "er": 1, "fo": 1, "he": 2,
"ic": 1, "ju": 1, "k_": 1, "la": 1, "mp": 1, "n_": 1, "og": 1, "ov": 1, "ow": 1,
"ox": 1, "pe": 1, "qu": 1, "r_": 1, "ro": 1, "th": 2, "ui": 1, "um": 1, "ve": 1,
"wn": 1, "x_": 1, "y_": 1, "zy": 1},
"trigrams": {"_br": 1, "_do": 1, "_fo": 1, "_ju": 1, "_la": 1, "_ov": 1, "_qu": 1, "_th": 1,
"azy": 1, "bro": 1, "ck_": 1, "d_o": 1, "dog": 1, "e_l": 1, "e_q": 1, "ed_": 1,
"er_": 1, "fox": 1, "he_": 2, "ick": 1, "jum": 1, "k_b": 1, "laz": 1, "mpe": 1,
"n_f": 1, "ove": 1, "own": 1, "ox_": 1, "ped": 1, "qui": 1, "r_t": 1, "row": 1,
"the": 2, "uic": 1, "ump": 1, "ver": 1, "wn_": 1, "x_j": 1, "y_d": 1,
"zy_": 1}}]
self.assertEqual(results, desired_results)
if __name__ == '__main__':
unittest.main()
|
[
"aphedges1@gmail.com"
] |
aphedges1@gmail.com
|
1daf7407cdf724ed73ebdce48a67a949af8b1698
|
c969779819894ad624a53273115a1a482181cb67
|
/src/models/api/plan_entry.py
|
68be97173886460c815f14471d7fe2ea11b5f130
|
[] |
no_license
|
nareshbandi123/sqldm104
|
c328edb38d42c6f3ed72e056f1f91ac3c5b0edeb
|
3705fdc43f177ebf5d7a0db687d1cd1d5d45985f
|
refs/heads/master
| 2022-01-18T00:16:10.451744
| 2019-06-19T13:06:39
| 2019-06-19T13:06:39
| 192,322,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from src.models import api
# Fields required for creating an entity have a value of True
# Fields that are optional or present only on a newly created
# entity are False.
PlanEntry = api.create_model_class(
"PlanEntry", {
"case_ids": False,
"suite_id": False,
"runs": False,
"description": False,
"name": False,
"include_all": False,
"config_ids": False,
"id": False,
"assignedto_id": False
}
)
|
[
"E004415@cigniti.com"
] |
E004415@cigniti.com
|
bbfea187f1197f779a6f46a13d3160c00942ada2
|
ac45b55915e634815922329195c203b1e810458c
|
/minionOC169_6.py
|
659b01807dece629aec12ea54c7963aecb6ffaca
|
[] |
no_license
|
mj1e16lsst/iridisPeriodicNew
|
96a8bfef0d09f13e18adb81b89e25ae885e30bd9
|
dc0214b1e702b454e0cca67d4208b2113e1fbcea
|
refs/heads/master
| 2020-03-23T15:01:23.583944
| 2018-07-23T18:58:59
| 2018-07-23T18:58:59
| 141,715,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,177
|
py
|
from operator import add
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.stats import LombScargle
#from gatspy.periodic import LombScargleFast
from functools import partial
#from gatspy import periodic
#import matplotlib.pyplot as plt
#from matplotlib.font_manager import FontProperties
import lomb_scargle_multiband as periodic
from multiprocessing import Pool
import numpy as np
import os
#from sqlite3 import *
import random
from random import shuffle
from random import randint
import Observations
import Magnitudes
# In[13]:
#conn = connect('minion_1016_sqlite.db')
#conn = connect('astro_lsst_01_1004_sqlite.db')
#conn = connect('minion_1020_sqlite.db')
# In[14]:
# LSST zero points u,g,r,i,z,y
zeroPoints = [0,26.5,28.3,28.13,27.79,27.4,26.58]
FWHMeff = [0.8,0.92,0.87,0.83,0.80,0.78,0.76] # arcmins?
pixelScale = 0.2
readOut = 12.7
sigSys = 0.005
flareperiod = 4096
flarecycles = 10
dayinsec=86400
background = 40
# sat mag u,g,r,i,z,y=14.7,15.7,15.8,15.8,15.3 and 13.9
# start date 59580.033829 end date + 10 years
#maglist=[20]*7
lim = [0, 23.5, 24.8, 24.4, 23.9, 23.3, 22.1] # limiting magnitude ugry
sat = [0, 14.7, 15.7, 15.8, 15.8, 15.3, 13.9] # sat mag as above
# In[15]:
looooops = 10000
maglength = 20
freqlength = 20
processors = 20
startnumber = 0 + 6
endnumber = startnumber + 1
#observingStrategy = 'minion'
observingStrategy = 'astroD'
#observingStrategy = 'panstars'
inFile = '/home/mj1e16/periodic/in'+str(startnumber)+'.txt'
outFile = '/home/mj1e16/periodic/outminionOC169'+str(startnumber)+'.txt'
#inFile = '/home/ubuntu/vagrant/'+observingStrategy+'/in'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
#outFile = '/home/ubuntu/vagrant/'+observingStrategy+'/out'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
obs = Observations.obsminionOC169
for y in range(len(obs)):
for x in range(len(obs[y])):
obs[y][x] = obs[y][x] + ((random.random()*2.)-1.)
# In[19]:
def magUncertainy(Filter, objectmag, exposuretime,background, FWHM): # b is background counts per pixel
countsPS = 10**((Filter-objectmag)/2.5)
counts = countsPS * exposuretime
uncertainty = 1/(counts/((counts/2.3)+(((background/2.3)+(12.7**2))*2.266*((FWHM/0.2)**2)))**0.5) # gain assumed to be 1
return uncertainty
#from lsst should have got the website! https://smtn-002.lsst.io/
# In[20]:
def averageFlux(observations, Frequency, exptime):
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
return c
def Flux(observations,Frequency,exptime):
a = [np.sin((2*np.pi*(Frequency)*x)) for x in observations]
return a
# In[21]:
def ellipsoidalFlux(observations, Frequency,exptime):
period = 1/(Frequency)
phase = [(x % (2*period)) for x in observations]
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
for x in range(0,len(phase)):
if (phase[x]+(1.5*period)) < (3*period):
c[x] = c[x]*(1./3.)
else:
c[x] = c[x]*(2./3.)
return c
## this is doing something but not the right something, come back to it
# In[22]:
def flaring(B, length, dayinsec=86400,amplitude=1):
global flareMag, minutes
fouriers = np.linspace(0.00001,0.05,(dayinsec/30))
logF = [np.log(x) for x in fouriers] # start at 30 go to a day in 30 sec increments
real = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers] #random.gauss(mu,sigma) to change for values from zurita
# imaginary = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers]
IFT = np.fft.ifft(real)
seconds = np.linspace(0,dayinsec, (dayinsec/30)) # the day in 30 sec increments
minutes = [x for x in seconds]
minimum = (np.max(-IFT))
positive = [x + minimum for x in IFT] # what did this even achieve? it helped with normalisation!
normalised = [x/(np.mean(positive)) for x in positive] # find normalisation
normalisedmin = minimum/(np.mean(positive))
normalised = [x - normalisedmin for x in normalised]
flareMag = [amplitude * x for x in normalised] # normalise to amplitude
logmins = [np.log(d) for d in minutes] # for plotting?
# plt.plot(minutes,flareMag)
# plt.title('lightcurve')
# plt.show()
return flareMag
# In[55]:
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list
#global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel
results = {}
totperiod = []
totmperiod = []
totpower = [] # reset
SigLevel = []
filterletter = ['o','u','g','r','i','z','y']
period = 1/(frequencyRange)
if period > 0.5:
numsteps = 10000
elif period > 0.01:
numsteps = 100000
else:
numsteps = 200000
freqs = fmin + df * np.arange(numsteps) # for manuel
allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset
measuredpower = [] # reset
y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only
for z in range(1, len(y)):
#y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range
y[z] = ellipsoidalFlux(obs[z], frequencyRange,30)
y[z] = [modulationAmplitude * t for t in y[z]] # scaling
for G in range(0, len(y[z])):
flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles)))
y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs
date = []
amplitude = []
mag = []
error = []
filts = []
for z in range(1, len(y)):
if objectmag[z] > sat[z] and objectmag[z] < lim[z]:
#date.extend([x for x in obs[z]])
date.extend(obs[z])
amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise
mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag
error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude))
filts.extend([filterletter[z]]*len(amplitude))
phase = [(day % (period*2))/(period*2) for day in obs[z]]
pmag = [objectmag[z] - t for t in amplitude]
# plt.plot(phase, pmag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
# plt.plot(date, mag, 'o')
# plt.xlim(lower,higher)
# plt.xlabel('time (days)')
# plt.ylabel('mag')
# plt.gca().invert_yaxis()
# plt.show()
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
if period > 10.:
model.optimizer.period_range=(10, 110)
elif period > 0.51:
model.optimizer.period_range=(0.5, 10)
elif period > 0.011:
model.optimizer.period_range=(0.01, 0.52)
else:
model.optimizer.period_range=(0.0029, 0.012)
LSperiod = model.best_period
if period < 10:
higher = 10
else:
higher = 100
# fig, ax = plt.subplots()
# ax.plot(1./freqs, power)
# ax.set(xlim=(0, higher), ylim=(0, 1.2),
# xlabel='period (days)',
# ylabel='Lomb-Scargle Power',
# title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)));
# plt.show()
phase = [(day % (period*2))/(period*2) for day in date]
#idealphase = [(day % (period*2))/(period*2) for day in dayZ]
#print(len(phase),len(idealphase))
#plt.plot(idealphase,Zmag,'ko',)
# plt.plot(phase, mag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
#print(period, LSperiod, period*20)
# print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos]))
# print(frequencyRange[frange], 'z', z)
# totperiod.append(period)
# totmperiod.append(np.mean(LSperiod))
# totpower.append(power.max())
mpower = power.max()
measuredpower.append(power.max()) # should this correspond to period power and not max power?
maxpower = []
counter = 0.
for loop in range(0,loopNo):
random.shuffle(date)
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
maxpower.append(power.max())
for X in range(0, len(maxpower)):
if maxpower[X] > measuredpower[-1]:
counter = counter + 1.
Significance = (1.-(counter/len(maxpower)))
#print('sig', Significance, 'counter', counter)
SigLevel.append(Significance)
#freqnumber = FrangeLoop.index(frequencyRange)
#magnumber = MagRange.index(objectmag)
#print(fullmaglist)
#listnumber = (magnumber*maglength)+freqnumber
# print(listnumber)
# measuredperiodlist[listnumber] = LSperiod
# periodlist[listnumber] = period
# powerlist[listnumber] = mpower
# siglist[listnumber] = Significance
# fullmaglist[listnumber] = objectmag
# results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber
results[0] = objectmag[3]
results[1] = period
results[2] = LSperiod
results[3] = Significance
results[4] = mpower
results[5] = 0#listnumber
return results
# In[24]:
#findObservations([(630,)])
#remove25(obs)
#averageFlux(obs[0], 1, 30)
longflare = []
for floop in range(0,flarecycles):
flareone = flaring(-1, flareperiod, amplitude=0.3)
flareone = flareone[0:1440]
positiveflare = [abs(x) for x in flareone]
longflare.extend(positiveflare)
# In[25]:
PrangeLoop = np.logspace(-2.5,2,freqlength)
FrangeLoop = [(1/x) for x in PrangeLoop]
# In[26]:
# reset results file
with open(inFile,'w') as f:
f.write('fullmaglist \n\n periodlist \n\n measuredperiodlist \n\n siglist \n\n powerlist \n\n listnumberlist \n\n end of file')
# In[57]:
results = []
fullmeasuredPeriod = []
fullPeriod = []
fullPower = []
fullSigLevel = []
fullMag = []
MagRangearray = np.linspace(17,24,maglength)
MagRange = [x for x in MagRangearray]
maglist = []
for x in range(len(MagRange)):
maglist.append([MagRange[x]]*7)
newlist = Magnitudes.mag169
pool = Pool(processors)
for h in range(startnumber,endnumber):
print(newlist[h])
results.append(pool.map(partial(lombScargle, objectmag=newlist[h]),FrangeLoop))
twoDlist = [[],[],[],[],[],[]]
for X in range(len(results)):
for Y in range(len(results[X])):
twoDlist[0].append(results[X][Y][0])
twoDlist[1].append(results[X][Y][1])
twoDlist[2].append(results[X][Y][2])
twoDlist[3].append(results[X][Y][3])
twoDlist[4].append(results[X][Y][4])
twoDlist[5].append(results[X][Y][5])
with open(inFile, 'r') as istr:
with open(outFile,'w') as ostr:
for i, line in enumerate(istr):
# Get rid of the trailing newline (if any).
line = line.rstrip('\n')
if i % 2 != 0:
line += str(twoDlist[int((i-1)/2)])+','
ostr.write(line+'\n')
|
[
"mj1e16@soton.ac.uk"
] |
mj1e16@soton.ac.uk
|
75e97332a66f40ccf2aeb33930b33748a7aab86e
|
23ce866f30ac93e1e47f31a50f94caf6b4cefd5b
|
/roles_exporter.py
|
0fedb68bc8e0199c6c75d64fd4368ef12f03ef8a
|
[] |
no_license
|
nsv777/ranger_automate
|
ca148ad566541c18144d3411d382e5fda288c209
|
cd25888fbf5b146b88eb6f82b28ae1aab8382dde
|
refs/heads/main
| 2023-03-18T19:21:31.839480
| 2021-01-14T19:34:23
| 2021-01-14T19:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script will import Ranger roles from one cluster to another #
# It requires python 2.7 for now, #
# Created by Ajinkya Patil #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import json
from requests import get, post
import requests
import time
from getpass import getpass
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
IMPORT_RANGER_URL = raw_input("SOURCE_RANGER URL:- ")
ROLES_API = "/service/roles/roles/"
IMPORT_RANGER_ADMIN_USER = raw_input("SOURCE_RANGER ADMIN USER:- ")
IMPORT_RANGER_ADMIN_PASSWORD = getpass(prompt='SOURCE_RANGER ADMIN PASSWORD:- ', stream=None)
headers = {'Accept' : 'application/json'}
# Importing roles with all the configured users and groups
response = get(IMPORT_RANGER_URL + ROLES_API, headers=headers, verify=False,
auth=(IMPORT_RANGER_ADMIN_USER, IMPORT_RANGER_ADMIN_PASSWORD))
roles_convert = json.loads(response.content)
ROLES = roles_convert['roles']
TOTAL_ROLES = len(ROLES)
print "Total number of roles " + str(TOTAL_ROLES) + " will be exported."
EXPORT_RANGER_URL = raw_input("DEST_RANGER URL:- ")
EXPORT_RANGER_ADMIN_USER = raw_input("DEST_RANGER ADMIN USER:- ")
EXPORT_RANGER_ADMIN_PASSWORD = getpass(prompt='DEST_RANGER ADMIN PASSWORD:- ', stream=None)
headers = {'Accept' : 'application/json'}
# Exporting roles with all the configured users and groups
FAILED_ROLES = []
IMPORTED_ROLES = []
for ROLE in ROLES :
time.sleep(5)
del ROLE['id']
response = post(EXPORT_RANGER_URL + ROLES_API, headers=headers, json=ROLE, verify=False,
auth=(EXPORT_RANGER_ADMIN_USER, EXPORT_RANGER_ADMIN_PASSWORD))
STATUS = response.status_code
ROLENAME = ROLE['name']
if STATUS is 200:
print "Successfully Imported Role" + ROLENAME + " with status " + str(STATUS)
SUCCESS_ROLES.append(ROLENAME)
IMPORTED_ROLES.append(ROLENAME)
else:
print "Import for role " + ROLENAME + " failed with status " + str(STATUS)
FAILED_ROLES.append(ROLENAME)
print "\n" + str(len(FAILED_ROLES)) + " Roles have failed to export" + \
"\n" + str(len(IMPORTED_ROLES)) + " Roles have been imported successfully"
print "\nCould not import Following roles:- "
for FAILED_ROLE in FAILED_ROLES:
print FAILED_ROLE
|
[
"noreply@github.com"
] |
nsv777.noreply@github.com
|
d4df30377b84c2a7d24afe5d182fc910ec733e56
|
4677a6e500c9dba3172492f58b0efac715b7f28e
|
/ironman_py_scripts15.py
|
54b84ff8a69c59b5c5de0593e091022f5005ce14
|
[] |
no_license
|
ironmanscripts/py_scripts
|
e54e8b0f809ba27d490e5c041423263fdedcb41a
|
cf6d9fc43545c9d6d7748297d06178850411d5b4
|
refs/heads/master
| 2021-07-01T16:27:18.499229
| 2017-09-18T03:04:36
| 2017-09-18T03:04:36
| 103,217,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
#!/bin/bash/python
#python escape sequence
print "BackSlash \\\""
print "SingleQuote \'"
print 'DoubleQuote \"'
print "\a"
print "\a"
print "\\\b"
print "\f"
print "\n new line"
|
[
"jnaguboina@gmail.com"
] |
jnaguboina@gmail.com
|
c7a08290ba0875283258f8e558b06287f0e4869e
|
2ecbe098ce01b889c1373523a5f07e117370206a
|
/backend/home/management/commands/load_initial_data.py
|
332d9ce61f3ee997f28e5036a456e35c25a431b7
|
[] |
no_license
|
crowdbotics-apps/jooo-4480
|
25ac69547a3d40bafccfcb1fdc6c4c7603671273
|
d51a7e6764e7f582da65954ce4297cab547e8f35
|
refs/heads/master
| 2022-12-13T18:01:32.627608
| 2019-06-10T11:43:37
| 2019-06-10T11:43:37
| 191,157,420
| 0
| 0
| null | 2022-12-09T05:45:38
| 2019-06-10T11:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">jooo</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'jooo'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
75db8b2e2ae8a679cfb3ef10c07e051f0c6c30c9
|
3440109c1b29c780a0a6092dace02f91b358382b
|
/list.py
|
2900fdb6b7539712a3bfe34f3d0aba203ee7f5ad
|
[] |
no_license
|
victor-py/victor
|
bacb3baf3178fc5f51cbe4cbb538e10970c39627
|
a9d09624e1845b5ab64d99e24d20f4b7a643725d
|
refs/heads/master
| 2022-04-25T04:29:47.610032
| 2020-04-19T10:10:56
| 2020-04-19T10:10:56
| 256,967,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
lists = []
print(lists)
lists.append(10)
lists.append(20)
print(lists)
tuples = tuple("hello. world!")
print(tuples)
|
[
"noreply@github.com"
] |
victor-py.noreply@github.com
|
450c22b5982c4514c0b654e41f040d0381883252
|
3bbc83ccd311b690c2fff6b1a1dbd38f4570ee4f
|
/setup.py
|
2f7884315b4044deae8123710eb2eddb34e38b14
|
[] |
no_license
|
hsharma35/bitfusion
|
25efbb4d52fa216429daaf2a8044c7e9f65ed61a
|
04ec1c87c91ef4a681858f84f5a7832d5b65e83b
|
refs/heads/master
| 2021-07-09T00:43:09.413463
| 2020-08-06T02:49:34
| 2020-08-06T02:49:34
| 153,977,735
| 74
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from distutils.core import setup
setup(name='bitfusion', version='1.0.0', packages=['bitfusion'])
|
[
"hsharma@gatech.edu"
] |
hsharma@gatech.edu
|
d63b2bd9d11950fc74f7c66616a0174c6b2b8c7b
|
979358a415b173657552890750c867fb75b69cb0
|
/papeletas.py
|
f026147bb80547d9c23ded9eafeafda29182527e
|
[] |
no_license
|
artus-lla/papeletas
|
1041ab6a9ad855c1fa196d5561bfecbf98f57d92
|
c69426d10e527e06127a6b6fe777c413aae1d9d3
|
refs/heads/master
| 2020-06-06T04:15:48.070179
| 2013-08-12T03:40:39
| 2013-08-12T03:40:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,718
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from gi.repository import Gtk
import sqlite3
import src.misutils as misutils
import src.bd as bd
#global autorizado_para
#global motivo
#autorizado_para = 'Salir'
class VentanaPrincipal:
""" Renderiza la ventana principal de la aplicación """
def __init__ (self):
""" Inicialización de la interface """
b = Gtk.Builder()
b.add_from_file("./ui/main.ui")
# Obtener ventana
self.window1 = b.get_object("window1")
self.notebook = b.get_object("notebook")
#self.window1.set_activate_default(True)
self.aboutdialog1 = b.get_object("aboutdialog1")
# Obtener botones de barra de tareas
self.btn_papeleta = b.get_object("btn_papeleta")
self.btn_reporte = b.get_object("btn_reporte")
self.btn_personal = b.get_object("btn_personal")
self.tbtn_salir = b.get_object("tbtn_salir")
# Obtener objetos ingreso personal
self.ent_dni = b.get_object("ent_dni")
self.ent_ape_nom = b.get_object("ent_ape_nom")
self.ent_profesion = b.get_object("ent_profesion")
self.ent_oficina = b.get_object("ent_oficina")
self.btn_guardar_personal = b.get_object("btn_guardar_personal")
self.vista_personal = b.get_object("vista_personal")
# Obtener objetos ingreso papeleta
self.ent_ing_num_papel = b.get_object("ent_ing_num_papel")
self.ent_ing_fecha = b.get_object("ent_ing_fecha")
self.ent_ing_nombres = b.get_object("ent_ing_nombres")
self.radio_contituar = b.get_object("radio_contituar")
self.radio_ingresar = b.get_object("radio_ingresar")
self.radio_salir = b.get_object("radio_salir")
self.ent_ing_hsalida = b.get_object("ent_ing_hsalida")
self.ent_ing_hretorno = b.get_object("ent_ing_hretorno")
self.radio2_particulares = b.get_object("radio2_particulares")
self.radio2_enfermedad = b.get_object("radio2_enfermedad")
self.radio2_personales = b.get_object("radio2_personales")
self.radio2_comision = b.get_object("radio2_comision")
self.text_fundamento = b.get_object("text_fundamento")
self.btn_ing_guardar = b.get_object("btn_ing_guardar")
self.btn_ing_nuevo = b.get_object("btn_ing_nuevo")
self.acerca_de = b.get_object("acerca_de")
# Reportes
self.combo_anio = b.get_object("combo_anio")
self.combo_mes = b.get_object("combo_mes")
self.btn_report = b.get_object("btn_report")
# Llenar combos
misutils.llenar_combo_anio(self.combo_anio)
misutils.llenar_combo_mes(self.combo_mes)
# Auto conectar con las señales
b.connect_signals(self)
self.window1.show()
self.notebook.hide()
# Destruir ventana
self.window1.connect('destroy', lambda w: Gtk.main_quit())
def on_btn_papeleta_toggled(self, widget, data=None):
if widget.get_active():
self.notebook.set_current_page(0)
#self.btn_papeleta.set_active(True)
self.notebook.show()
self.btn_reporte.set_active(False)
self.btn_personal.set_active(False)
def on_btn_reporte_toggled(self, widget, data=None):
if widget.get_active():
self.notebook.set_current_page(1)
#self.btn_reporte.set_active(True)
self.notebook.show()
self.btn_papeleta.set_active(False)
self.btn_personal.set_active(False)
def on_btn_personal_toggled(self, widget, data=None):
if widget.get_active():
self.notebook.set_current_page(2)
self.notebook.show()
self.btn_papeleta.set_active(False)
self.btn_reporte.set_active(False)
self.ent_dni.grab_focus()
def on_tbtn_salir_clicked(self, widget, data=None):
print("Good bye")
Gtk.main_quit()
# ========= Ingresar papeleta ===================
def on_acerca_de_clicked(self, widget, data=None):
self.aboutdialog1.run()
self.aboutdialog1.hide()
def on_ent_ing_num_papel_insert_text(self, widget, Text, position, data=None):
misutils.solo_numeros(self.ent_ing_num_papel, Text)
def on_ent_ing_fecha_insert_text(self, widget, Text, position, data=None):
misutils.fecha(self.ent_ing_fecha, Text)
def on_ent_ing_hsalida_insert_text(self, widget, Text, position, data=None):
misutils.hora(self.ent_ing_hsalida, Text)
def on_ent_ing_hretorno_insert_text(self, widget, Text, position, data=None):
misutils.hora(self.ent_ing_hretorno, Text)
def on_btn_ing_guardar_clicked(self, widget, data=None):
"""Grabar datos de papeleta"""
autorizado_para = ""
motivo = ""
# * Obtener datos
num_papeleta = self.ent_ing_num_papel.get_text()
fecha = self.ent_ing_fecha.get_text()
nombres = self.ent_ing_nombres.get_text()
# ** Determinar el valor de la variable autorizado para
if self.radio_contituar.get_active() is True:
autorizado_para = "Continuar"
elif self.radio_ingresar.get_active() is True:
autorizado_para = "Ingresar"
else:
autorizado_para = "Salir"
#print(autorizado_para)
hora_salida = self.ent_ing_hsalida.get_text()
hora_retorno = self.ent_ing_hretorno.get_text()
# ** Determinar el valor de la variable motivo
if self.radio2_enfermedad.get_active() is True:
motivo = "Enfermedad"
elif self.radio2_personales.get_active() is True:
motivo = "Personales"
elif self.radio2_comision.get_active() is True:
motivo = "Comisión de servicios"
else:
motivo = "Particulares"
#print(motivo)
fundamento = self.text_fundamento.get_text()
campos = ( num_papeleta, fecha, nombres,
autorizado_para, hora_salida, hora_retorno, motivo,
fundamento )
try:
fecha = misutils.arreglar_fecha(fecha)
bd_papeletas = sqlite3.connect("./data/papeletas.db")
cursor = bd_papeletas.cursor()
cursor.execute(bd.insertar_papeleta, campos)
bd_papeletas.commit()
bd_papeletas.close()
# Limpiar campos
self.ent_ing_num_papel.set_text("")
self.ent_ing_fecha.set_text("")
self.ent_ing_nombres.set_text("")
self.ent_ing_hsalida.set_text("")
self.ent_ing_hretorno.set_text("")
self.text_fundamento.set_text("")
self.ent_ing_num_papel.grab_focus()
except sqlite3.IntegrityError:
misutils.mensaje_dialogo("Debe llenar todos los campos")
except IndexError:
misutils.mensaje_dialogo("La fecha proporsionada no es correcta")
self.ent_ing_fecha.grab_focus()
def on_btn_report_clicked(self, widget, data=None):
"""Generación del reporte"""
anio = misutils.valor_combobox(self.combo_anio)
mes = misutils.valor_combobox(self.combo_mes)
if anio == None:
misutils.mensaje_dialogo("Debe seleccionar un año")
#try:
bd_papeletas = sqlite3.connect("./data/papeletas.db")
cursor = bd_papeletas.cursor()
cursor.execute(bd.reporte)
reporte = cursor.fetchall()
print(reporte)
bd_papeletas.commit()
bd_papeletas.close()
#except:
# ========== Ingreso de Personal ===================
def on_ent_dni_insert_text(self, widget, Text, position, data=None):
"""Permitir sólo números dni personal"""
misutils.solo_numeros(self.ent_dni, Text)
def on_btn_guardar_personal_clicked(self, widget, data=None):
"""Graba datos de personal en la tabla personal"""
# Obtener datos
dni = self.ent_dni.get_text()
nombre = self.ent_ape_nom.get_text()
profesion = self.ent_profesion.get_text()
oficina = self.ent_oficina.get_text()
# Tupla con los campos
campos = (dni, nombre, profesion, oficina)
falta_campo = False # si falta algún campo
for campo in campos:
if campo is None or campo == "": # comprobar que ningún campo esté vacío
falta_campo = True
if falta_campo is True:
misutils.mensaje_dialogo("Debe llenar todos los campos")
else:
try:
bd_papeletas = sqlite3.connect("./data/papeletas.db")
cursor = bd_papeletas.cursor()
cursor.execute(bd.insertar_datos, campos)
cursor.execute(bd.select_personal)
personal = cursor.fetchall()
bd_papeletas.commit()
bd_papeletas.close()
# Limpar campos
self.ent_dni.set_text("")
self.ent_ape_nom.set_text("")
self.ent_profesion.set_text("")
self.ent_oficina.set_text("")
self.ent_dni.grab_focus()
# Poblar tree view vista_personal
lista = Gtk.ListStore(str, str, str, str)
for tupla in personal:
lista.append([tupla[0], tupla[1], tupla[2], tupla[3]])
#print(tupla)
#lista.append(["Negro", 12])
render = Gtk.CellRendererText()
columna1 = Gtk.TreeViewColumn("DNI", render, text=0)
columna2 = Gtk.TreeViewColumn("Apellidos y Nombres",render,text=1)
columna3 = Gtk.TreeViewColumn("Profesión",render,text=2)
columna4 = Gtk.TreeViewColumn("Oficina o servicio",render,text=3)
self.vista_personal.set_model(lista)
self.vista_personal.append_column(columna1)
self.vista_personal.append_column(columna2)
self.vista_personal.append_column(columna3)
self.vista_personal.append_column(columna4)
self.vista_personal.show()
except sqlite3.IntegrityError:
misutils.mensaje_dialogo("El campo DNI debe tener 8 dígitos o el DNI ya existe")
self.ent_dni.grab_focus()
if __name__ == "__main__":
gui = VentanaPrincipal()
Gtk.main()
|
[
"artus.lla@autistici.org"
] |
artus.lla@autistici.org
|
df9761b1d49fd97933e30ada73416e0f3957e0ab
|
845bd3b74cb2bff98ab42a38f01a9bfcd700d1f8
|
/virtual/bin/wheel
|
29b6eaa62bd04f1968011ac216b3bee3330498d4
|
[] |
no_license
|
stacy867/instagram
|
e2a32faaaec128eee82bdf987f4bcbe3a23468bf
|
ed9f9215eb7dd4a2ca6eded2afcea31afb8bef1a
|
refs/heads/master
| 2020-08-17T22:46:50.297256
| 2019-10-22T12:30:07
| 2019-10-22T12:30:07
| 215,719,516
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
#!/home/wecode/insta_app/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"stacymurenzi@gmail.com"
] |
stacymurenzi@gmail.com
|
|
69c5ee9302b315934ab4ace9758bfd16d0677d35
|
cfcb36f7479cae08b09c20a7c5d8a67f9ba532b1
|
/hw2_template_dev.py
|
d61d06bdfb5262ce9eac9dddf00239fe0852079c
|
[] |
no_license
|
roseteague/Computational_Methods_HW2
|
4fd95989941487029964e43ae8253bf44565169a
|
e6bf765597a8e2342a29b1242bd8586f5b9117c9
|
refs/heads/master
| 2020-03-25T04:38:38.192282
| 2017-11-16T23:37:48
| 2017-11-16T23:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,989
|
py
|
"""
Rosemary Teague
00828351
Assumes cost.f90 has been compiled with f2py to generate the module,
hw2mod.so (filename may also be of form hw2mod.xxx.so where xxx is system-dependent text) using
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors, ticker, cm
import scipy.optimize
from hw2mod import cost
from hw2mod import hw2
from time import time
from time import clock
import timeit
def visualize(Nx,Ny,xrange=[-10,10],yrange=[-10,10], Noise=1.0):
"""
===============================================================
Visualization of a 2D cost function, j, of the form:
j = j + (1 - x)^2 + 20(y - x^2)^2
===============================================================
Parameters
------------
Nx : Integer
Number of points along the x-direction to be plotted
Ny : Integer
Number of points along the y-direction to be plotted
xrange : list, optional
Range of x-points to be considered. Default from -10<x<10
yrange : list, optional
Range of y-points to be considered. Default from -10<y<10
Noise : float
Amplitude of Noise to be considered in second plot.
Returns
----------
N/A
Calling this function will save two figures to the users directory. A plot
titled hw211.png will display a contour plot of the cost function, on a
logarithmic scale in j, between the values specified in xrange and yrange.
A second plot titled hw212.png will display the same function over the same
range but with a random noise added, the amplitude of which can be set as
a parameter.
"""
#Create 2D array of points
[X,Y]=np.linspace(xrange[0],xrange[1],Nx),np.linspace(yrange[0],yrange[1],Ny)
#calculate noiseless cost function at each point on 2D grid
cost.c_noise=False
j=[[cost.costj([xi,yi]) for xi in X] for yi in Y]
#calculate noisey cost function at each point in 2D grid.
cost.c_noise = True
cost.c_noise_amp = Noise
jn=[[cost.costj([xi,yi]) for xi in X] for yi in Y]
#create contour plots of cost functions with and without noise
plt.figure()
fig, ax = plt.subplots()
cp = ax.contourf(X, Y, j, locator=ticker.LogLocator(), cmap=cm.GnBu)
cbar = fig.colorbar(cp)
plt.title('Rosemary Teague, Visualize \n 2D cost function, no noise.')
plt.savefig('hw211', dpi = 700)
plt.figure()
fig, ax = plt.subplots()
cpn = ax.contourf(X, Y, jn, locator=ticker.LogLocator(), cmap=cm.GnBu)
cbar = fig.colorbar(cpn)
plt.title('Rosemary Teague, Visualize \n 2D cost function, Noise amplitude='+str(cost.c_noise_amp))
plt.savefig('hw212', dpi = 700)
def newton_test(xg,display=False,i=1,timing=False):
"""
============================================================================
Use Newton's method to minimize a cost function, j, defined in cost module.
============================================================================
Parameters
----------
xg : list
Initial guess
display : Boolean, Optional
If set to True, figures will be created to illustrate the optimization
path taken and the distance from convergence at each step.
i=1 : Integer, Optional
Sets the name of the figures as hw22i.png
timing : Boolean, Optional
If set to true, an average time will be calculated for the completion
of finding a minimum and will be appended to the tuple output.
Returns
---------
xf : ndarray
Computed location of minimum
jf : float
Computed minimum
output : Tuple
containing the time taken for the minimia to be found. An
average over 10 tests, only set if timining parameter set to True, otherwise
empty.
Calling this function will produce a figure containing two subplots. The first
will illustrate the location of each step in the minimization path, overlayed
over the initial cost function. The second will illustrate the distance from
the final, computed minimum at each iteration.
"""
cost.c_noise=False
hw2.tol=10**(-6)
hw2.itermax=1000
t21=0
output=()
if timing:
N=10
else:
N=1
for j in range(1,N):
t1=time()
hw2.newton(xg)
t2=time()
t21=t21+(t2-t1)
X,Y=hw2.xpath
xf=[X[-1],Y[-1]]
jpathn=[j for j in hw2.jpath]
jf=hw2.jpath[-1]
output=(t21/N, X, Y, jpathn)
if display:
Minx=min(X)-1
Maxx=max(X)+1
Miny=min(Y)-1
Maxy=max(Y)+1
[Xj,Yj]=np.linspace(Minx,Maxx,200),np.linspace(Miny,Maxy,200)
#calculate noiseless cost function at each point on 2D grid
j=[[cost.costj([xi,yi]) for xi in Xj] for yi in Yj]
f, (p1,p2) = plt.subplots(1,2)
p1.contourf(Xj, Yj, j, locator=ticker.LogLocator(), cmap=cm.GnBu)
p1.plot(X,Y,'g',marker='d')
p1.set_xlim(min(X)-1,max(X)+1)
p1.set_xlabel('X1-location')
p1.set_ylabel('X2-location')
p1.set_title('Convergence Path')
p2.plot(np.linspace(0,len(X)-1,len(X)),hw2.jpath-jf)
p2.set_xlabel('Iteration number')
p2.set_ylabel('distance from converged minimum')
p2.set_title('Rate')
plt.suptitle('Rosemary Teague, Newton_test, initial guess ='+str(xg)+' \n Convergence of a cost function')
plt.tight_layout(pad=4)
plt.savefig('hw22'+str(i), dpi=700)
return xf,jf,output
def bracket_descent_test(xg,display=False,compare=False,i=1):
"""
======================================================================================
Use the Bracket Descent method to minimize a cost function, j, defined in cost module.
======================================================================================
Parameters
----------
xg : list
Initial guess
display : Boolean, Optional
If set to True, figures will be created to illustrate the optimization
path taken and the distance from convergence at each step.
compare : Boolean, optional
If set to True, a figure will be created to directly compare Newton and
Bracket Descent methods.
i=1 : Integer, Optional
Sets the name of the figures as hw231(/2)_i.png.
Returns
---------
xf : ndarray
Computed location of minimum
jf : float
Computed minimum
output : Tuple
containing the time taken for the minimia to be found for each of newton and
bracket descent methods. An average over 10 tests is taken, only set if
compare parameter set to True, otherwise empty.
Calling this function will produce two figures. The first will containing two
subplots illustrating the location of each step in the minimization path, overlayed
over the initial cost function, and the distance of j from the final, computed
minimum at each iteration.
The second plot (which is only produced when 'compare' is set to True) demonstrates
the distance of each step from the final, converged minimum at each iteration.
This shows that the newton method requires significantly fewer steps and is hence
faster.
Trends Observed
----------------
Figures hw321_i show the path taken during a bracket descent conversion is much
longer than in a newton conversion (shown in figures hw22i). This is because
the B-D method limits the size of a step to 2*L where L is definied by the size
of an equilateral triangle whose centroid moved with each step. The method is
furthermore designed such that this triangle will only decrease in size per
iteration, and hence the maximum length a step can take can only be
decreased (not increased) throughout the convergence. The figures further show
that steps appear to be taken initially perpendicular to the curvature, finding
the minimum along that strip, and then converging in down the parallel Path
until they reach a level of tolerance.
In contrast, the Newton approach is not limited in the size of the steps it is
able to take and can hence converge in a much smaller number of iterations.
This is a result of the use of gradients in this method. Figures hw22i illustrate
how each step travels through many bands on the contour plot (representing
differences of 1 order of magnitude each) as the method searches for the
direction of minimisation.
"""
cost.c_noise=False
hw2.tol=10**(-6)
hw2.itermax=1000
t34=0
output = ()
if compare:
N=10
else:
N=1
for j in range(1,N):
t3=time()
hw2.bracket_descent(xg)
t4=time()
t34=t34+(t4-t3)
X,Y=hw2.xpath
xf=[X[-1],Y[-1]]
jf=hw2.jpath[-1]
d1=np.sqrt((X-xf[0])**2+(Y-xf[1])**2)
if display:
Minx=min(X)-1
Maxx=max(X)+1
Miny=min(Y)-1
Maxy=max(Y)+1
[Xj,Yj]=np.linspace(Minx,Maxx,200),np.linspace(Miny,Maxy,200)
#calculate noiseless cost function at each point on 2D grid
j=[[cost.costj([xi,yi]) for xi in Xj] for yi in Yj]
f, (p1,p2) = plt.subplots(1,2)
p1.contourf(Xj, Yj, j, locator=ticker.LogLocator(), cmap=cm.GnBu)
p1.plot(X,Y,'g',marker='d')
p1.set_xlabel('X1-location')
p1.set_ylabel('X2-location')
p1.set_title('Convergence Path')
p2.semilogy(np.linspace(1,len(X),len(X)),hw2.jpath)
p2.set_xlabel('Iteration number')
p2.set_ylabel('distance from converged minimum')
p2.set_title('Rate')
plt.suptitle('Rosemary Teague, bracket_descent_test, initial guess ='+str(xg)+' \n Rate of convergence of a cost function')
plt.tight_layout(pad=4)
plt.savefig('hw231_'+str(i), dpi=700)
if compare:
plt.close('all')
One,=plt.loglog(np.linspace(1,len(X),len(X)),hw2.jpath)
xf2,jf2,outputn=newton_test(xg,timing=True)
X2,Y2=outputn[1],outputn[2]
d2=np.sqrt((X2-xf2[0])**2+(Y2-xf2[1])**2)
print(np.linspace(1,len(X2),len(X2)),outputn[3])
Two,=plt.loglog(np.linspace(1,len(X2),len(X2)),outputn[3])
One.set_label('Bracket Descent')
Two.set_label('Newton')
plt.xlabel('Iteration number')
plt.ylabel('Distance from converged minimum')
plt.legend()
plt.title('Rosemary Teague, bracket_descent_test, initial guess ='+str(xg)+' \n Comparison of Newton and Bracket Descent Methods')
plt.savefig('hw232_'+str(i), dpi=700)
output=(outputn[0],t34/N)
return xf,jf,output
def performance(tol):
"""
============================================================================
Assesses the performance of Bracket Descent and Scipy L-BFGS-B Methods
============================================================================
Parameters
------------
tol : float
Determines the tolerance for minimization
Returns
------------
This function will produce 4 figures.
The first 3 will represent a comparison of the precison of each method while
the 4th will represent a comparison of the timing.
The first three show the location of the computed minima for initial guesses
of [-100,-3], [-50,-3], [-10,-3] and [-1,-3]. These are overlayed onto the
original cost function; the Scipy L-BFGS-B results are represented by red
diamonds while the Bracket Descent results are represented by blue diamonds.
The three figures represent the cases when the noise amplitude is set to 0,
1, and 10.
The final figure consists of four subplots, the upper row represents the
computational time taken for convergence, given an initial x starting point,
while the lower represents the number of iterations requried. In each case
the Scipy L-BFGS-B method is shown on the left and the Bracket descent is
shown on the right. A legend on each plot differentiates the cases when the
Noise Ampplitude is set to 0, 1, and 10.
Trends Observed
----------------
For all cases, the Scipy minimization function appears to be more consistent
(to rely less on the initial guess) than the fortran Bracket Descent method.
This is seen in figures hw241-hw243, where the B-D results are seen to cover
a broader spead of final coordinated. These figures also illustrate that as
the level of noise of the cost function is increased, the Scipy L-BFGS-B
method becomes increasingly favourable over the Bracket descent approach,
producing more precise results each time.
This is a result of the lack of consideration for noise within the Bracket
Descent method; that is to say that any random fluctations which result in
two neighbouring points (along the convergence path) lying within the
tolerance limit will be assumed to be the true minimum of the function as
defined by the B-D method. However, it is likely that the Scipy L-BFGS-B
method is adapted to smooth out noisy functions and hence find the true
minimum more reliably.
A consideration of figure hw244, however, demonstrates an advantage of the
B-D method over the Scipy L-BGFS-B minimization in the form of timing. It can
be seen that despite requiring more iterations before converging to within a
set tolerance, the total computational time is less to within a factor of 10.
"""
plt.close('all')
count=0
hw2.tol=tol
nintb=[];nintl=[]; tlbfgsb=[]; txfbd=[]; lbfgsx=[]; lbfgsy=[]; xfbdx=[]; xfbdy=[];
cost.c_noise=True
for cost.c_noise_amp in [0., 1., 10.]:
count=count+1
for [x,y] in [[-100.,-3.],[-50.,-3.],[-10.,-3.],[-1.,-3.]]:
t12=0;t34=0
for i in range(0,1000):
t1=time()
scipy.optimize.minimize(cost.costj, [x,y], method='L-BFGS-B' ,tol=tol)
t2=time()
t12=t12+(t2-t1)
t3=time()
hw2.bracket_descent([x,y])
t4=time()
t34=t34+(t4-t3)
tlbfgsb.append(t12/1000); txfbd.append(t34/1000)
info=scipy.optimize.minimize(cost.costj, [x,y], method='L-BFGS-B' ,tol=tol)
xfbd,jfbd,i2=hw2.bracket_descent([x,y])
# print('method: ', 'Fortran Bracket Descent')
# print('Value: ', jfbd)
# print('number of iterations:', i2)
# print('x: ', xfbd)
# print('c_noise: ', cost.c_noise)
# print(' ')
#print(info)
x=info.x
lbfgsx.append(x[0])
lbfgsy.append(x[1])
xfbdx.append(xfbd[0])
xfbdy.append(xfbd[1])
nint=info.nit
nintl.append(nint)
nintb.append(i2)
Minx=1+(min([min(xfbdx[(count-1)*4:count*4]),min(lbfgsx[(count-1)*4:count*4])])-1)*1.1
Maxx=1+(max([max(xfbdx[(count-1)*4:count*4]),max(lbfgsx[(count-1)*4:count*4])])-1)*1.1
Miny=1+(min([min(xfbdy[(count-1)*4:count*4]),min(lbfgsy[(count-1)*4:count*4])])-1)*1.1
Maxy=1+(max([max(xfbdy[(count-1)*4:count*4]),max(lbfgsy[(count-1)*4:count*4])])-1)*1.1
[X,Y]=np.linspace(Minx,Maxx,200),np.linspace(Miny,Maxy,200)
#calculate noiseless cost function at each point on 2D grid
j=[[cost.costj([xi,yi]) for xi in X] for yi in Y]
#create contour plots of cost functions with and without noise
fig, p4 = plt.subplots()
cp = p4.contourf(X, Y, j, locator=ticker.LogLocator(), cmap=cm.GnBu)
cbar = fig.colorbar(cp)
BD,=p4.plot(xfbdx[(count-1)*4:count*4],xfbdy[(count-1)*4:count*4],'b',linestyle='None',marker='d',markersize=6)
Scipy,=p4.plot(lbfgsx[(count-1)*4:count*4],lbfgsy[(count-1)*4:count*4],'r',linestyle='None',marker='d',markersize=6)
BD.set_label('Fortran Bracket Descent')
Scipy.set_label('Scipy optimize L-BFGS-B')
plt.legend(loc='upper left', fontsize='small')
plt.suptitle('Rosemary Teague, performance \n Comparison of converged values, Noise='+str(int(cost.c_noise_amp)))
#plt.tight_layout(pad=5)
plt.savefig('hw24'+str(count), dpi=700)
print(tlbfgsb)
plt.close('all')
f4, (p414,p424) = plt.subplots(2,2,sharey=True)
one,=p414[0].plot(tlbfgsb[:4],[np.abs(-100.),np.abs(-50.),np.abs(-10.),np.abs(-1.)],'r',marker='x',markersize=12)
two,=p414[0].plot(tlbfgsb[4:8],[np.abs(-100.),np.abs(-50.),np.abs(-10.),np.abs(-1.)],'m',marker='x',markersize=12)
three,=p414[0].plot(tlbfgsb[8:],[np.abs(-100.),np.abs(-50.),np.abs(-10.),np.abs(-1.)],'#c79fef',marker='x',markersize=12)
one.set_label('No Noise')
two.set_label('Noise = 1.0')
three.set_label('Noise = 10.0')
p414[0].set_title('Scipy Optimise L-BFGS-B')
p414[0].set_xlabel('Time Taken')
p414[0].legend( loc = 'upper right', fontsize = 'x-small')
p414[0].xaxis.set_ticks(np.linspace(min(tlbfgsb),max(tlbfgsb),3))
p414[0].ticklabel_format(useOffset=False)
uno,=p414[1].plot(txfbd[:4],[np.abs(-100.-xfbdx[0]),np.abs(-50.-xfbdx[1]),np.abs(-10.-xfbdx[2]),np.abs(-1.-xfbdx[3])],'b',marker='x',markersize=12)
dos,=p414[1].plot(txfbd[4:8],[np.abs(-100.-xfbdx[4]),np.abs(-50.-xfbdx[5]),np.abs(-10.-xfbdx[6]),np.abs(-1.-xfbdx[7])],'g',marker='x',markersize=12)
tres,=p414[1].plot(txfbd[8:],[np.abs(-100.-xfbdx[8]),np.abs(-50.-xfbdx[9]),np.abs(-10.-xfbdx[10]),np.abs(-1.-xfbdx[11])],'c',marker='x',markersize=12)
uno.set_label('No Noise')
dos.set_label('Noise = 1.0')
tres.set_label('Noise = 10.0')
p414[1].set_title('Fortran Bracket Descent')
p414[1].set_xlabel('Time Taken')
p414[1].legend(loc = 'upper left', fontsize = 'x-small')
p414[1].xaxis.set_ticks(np.linspace(min(txfbd),max(txfbd),3))
one1,=p424[0].plot(nintl[:4],[np.abs(-100.-lbfgsx[0]),np.abs(-50.-lbfgsx[1]),np.abs(-10.-lbfgsx[2]),np.abs(-1.-lbfgsx[3])],'r',marker='x',markersize=12)
two2,=p424[0].plot(nintl[4:8],[np.abs(-100.-lbfgsx[4]),np.abs(-50.-lbfgsx[5]),np.abs(-10.-lbfgsx[6]),np.abs(-1.-lbfgsx[7])],'m',marker='x',markersize=12)
three3,=p424[0].plot(nintl[8:],[np.abs(-100.-lbfgsx[8]),np.abs(-50.-lbfgsx[9]),np.abs(-10.-lbfgsx[10]),np.abs(-1.-lbfgsx[11])],'#c79fef',marker='x',markersize=12)
one1.set_label('No Noise')
two2.set_label('Noise = 1.0')
three3.set_label('Noise = 10.0')
p424[0].set_xlabel('Number of Iterations')
p424[0].legend( loc = 'upper left', fontsize = 'x-small')
p424[0].ticklabel_format(useOffset=False)
uno1,=p424[1].plot(nintb[:4],[np.abs(-100.-xfbdx[0]),np.abs(-50.-xfbdx[1]),np.abs(-10.-xfbdx[2]),np.abs(-1.-xfbdx[3])],'b',marker='x',markersize=12)
dos2,=p424[1].plot(nintb[4:8],[np.abs(-100.-xfbdx[4]),np.abs(-50.-xfbdx[5]),np.abs(-10.-xfbdx[6]),np.abs(-1.-xfbdx[7])],'g',marker='x',markersize=12)
tres3,=p424[1].plot(nintb[8:],[np.abs(-100.-xfbdx[8]),np.abs(-50.-xfbdx[9]),np.abs(-10.-xfbdx[10]),np.abs(-1.-xfbdx[11])],'c',marker='x',markersize=12)
uno1.set_label('No Noise')
dos2.set_label('Noise = 1.0')
tres3.set_label('Noise = 10.0')
p424[1].set_xlabel('Number of Iterations')
p424[1].legend(loc = 'upper left', fontsize = 'x-small')
f4.text(0.04, 0.5, 'Initial x-distance from Converged minimum', va='center', rotation='vertical')
plt.suptitle('Rosemary Teague, performance \n Time taken for values to converge',fontsize='large')
plt.tight_layout(pad=3.5, h_pad=1,w_pad=1)
plt.savefig('hw244', dpi=700)
if __name__ == '__main__':
#
# visualize(200,200)
#
# newton_test([10.,10.],display=True,i=1)
# newton_test([5.,5.],display=True,i=2)
# newton_test([2.,2.],display=True,i=3)
#
# bracket_descent_test([10.,10.],display=True,compare=True,i=1)
# bracket_descent_test([5.,5.],display=True,compare=True,i=2)
# bracket_descent_test([2.,2.],display=True,compare=True,i=3)
performance(10**(-6))
|
[
"rdt13@ic.ac.uk"
] |
rdt13@ic.ac.uk
|
45371882e8a70294d4262634c9fd50c2d4c24f59
|
5da77c20e1f3cbec5f5f3320549d3982a4722fb9
|
/baselines/netmf.py
|
e25cb4344e800f645cf38e1d4d3fe6718fb48e1b
|
[
"MIT"
] |
permissive
|
samihaija/tf-fsvd
|
02f6f0237a0e0cfe08f07943909d3ee14f3ea350
|
677cad8cfa21668369ce39c515874dabfbc021d5
|
refs/heads/main
| 2023-03-09T22:22:33.805652
| 2021-02-23T11:01:41
| 2021-02-23T11:01:41
| 339,504,112
| 16
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,247
|
py
|
# THIS IS MODIFIED FROM ORIGINAL BY SAMI TO RUN ASYM-PROJ DATASETS
# Original: https://github.com/xptree/NetMF/
# encoding: utf-8
# File Name: eigen.py
# Author: Jiezhong Qiu
# Create Time: 2017/07/13 16:05
# TODO:
import time
import scipy.io
import scipy.sparse as sparse
from scipy.sparse import csgraph
import numpy as np
import argparse
import logging
import theano
from theano import tensor as T
logger = logging.getLogger(__name__)
theano.config.exception_verbosity='high'
def load_adjacency_matrix(file, variable_name="network"):
if file.endswith('.mat'):
data = scipy.io.loadmat(file)
logger.info("loading mat file %s", file)
return data[variable_name]
elif file.endswith('.txt.npy'):
train_edges = np.load(file)
train_edges = np.concatenate([train_edges, train_edges[:, ::-1]], axis=0)
csr = scipy.sparse.csr_matrix((np.ones([len(train_edges)]), (train_edges[:, 0], train_edges[:, 1]) ))
return csr
def deepwalk_filter(evals, window):
for i in range(len(evals)):
x = evals[i]
evals[i] = 1. if x >= 1 else x*(1-x**window) / (1-x) / window
evals = np.maximum(evals, 0)
logger.info("After filtering, max eigenvalue=%f, min eigenvalue=%f",
np.max(evals), np.min(evals))
return evals
def approximate_normalized_graph_laplacian(A, rank, which="LA"):
n = A.shape[0]
L, d_rt = csgraph.laplacian(A, normed=True, return_diag=True)
# X = D^{-1/2} W D^{-1/2}
X = sparse.identity(n) - L
logger.info("Eigen decomposition...")
#evals, evecs = sparse.linalg.eigsh(X, rank,
# which=which, tol=1e-3, maxiter=300)
evals, evecs = sparse.linalg.eigsh(X, rank, which=which)
logger.info("Maximum eigenvalue %f, minimum eigenvalue %f", np.max(evals), np.min(evals))
logger.info("Computing D^{-1/2}U..")
D_rt_inv = sparse.diags(d_rt ** -1)
D_rt_invU = D_rt_inv.dot(evecs)
return evals, D_rt_invU
def approximate_deepwalk_matrix(evals, D_rt_invU, window, vol, b):
evals = deepwalk_filter(evals, window=window)
X = sparse.diags(np.sqrt(evals)).dot(D_rt_invU.T).T
m = T.matrix()
mmT = T.dot(m, m.T) * (vol/b)
f = theano.function([m], T.log(T.maximum(mmT, 1)))
Y = f(X.astype(theano.config.floatX))
logger.info("Computed DeepWalk matrix with %d non-zero elements",
np.count_nonzero(Y))
return sparse.csr_matrix(Y)
def svd_deepwalk_matrix(X, dim):
u, s, v = sparse.linalg.svds(X, dim, return_singular_vectors="u")
# return U \Sigma^{1/2}
return sparse.diags(np.sqrt(s)).dot(u.T).T
def netmf_large(args):
logger.info("Running NetMF for a large window size...")
logger.info("Window size is set to be %d", args.window)
# load adjacency matrix
A = load_adjacency_matrix(args.input,
variable_name=args.matfile_variable_name)
vol = float(A.sum())
# perform eigen-decomposition of D^{-1/2} A D^{-1/2}
# keep top #rank eigenpairs
evals, D_rt_invU = approximate_normalized_graph_laplacian(A, rank=args.rank, which="LA")
# approximate deepwalk matrix
deepwalk_matrix = approximate_deepwalk_matrix(evals, D_rt_invU,
window=args.window,
vol=vol, b=args.negative)
# factorize deepwalk matrix with SVD
deepwalk_embedding = svd_deepwalk_matrix(deepwalk_matrix, dim=args.dim)
logger.info("Save embedding to %s", args.output)
np.save(args.output, deepwalk_embedding, allow_pickle=False)
def direct_compute_deepwalk_matrix(A, window, b):
n = A.shape[0]
vol = float(A.sum())
L, d_rt = csgraph.laplacian(A, normed=True, return_diag=True)
# X = D^{-1/2} A D^{-1/2}
X = sparse.identity(n) - L
S = np.zeros_like(X)
X_power = sparse.identity(n)
for i in range(window):
logger.info("Compute matrix %d-th power", i+1)
X_power = X_power.dot(X)
S += X_power
S *= vol / window / b
D_rt_inv = sparse.diags(d_rt ** -1)
M = D_rt_inv.dot(D_rt_inv.dot(S).T)
m = T.matrix()
f = theano.function([m], T.log(T.maximum(m, 1)))
Y = f(M.todense().astype(theano.config.floatX))
return sparse.csr_matrix(Y)
def netmf_small(args):
logger.info("Running NetMF for a small window size...")
logger.info("Window size is set to be %d", args.window)
# load adjacency matrix
A = load_adjacency_matrix(args.input,
variable_name=args.matfile_variable_name)
# directly compute deepwalk matrix
deepwalk_matrix = direct_compute_deepwalk_matrix(A,
window=args.window, b=args.negative)
# factorize deepwalk matrix with SVD
deepwalk_embedding = svd_deepwalk_matrix(deepwalk_matrix, dim=args.dim)
logger.info("Save embedding to %s", args.output)
np.save(args.output, deepwalk_embedding, allow_pickle=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True,
help=".mat input file path")
parser.add_argument('--matfile-variable-name', default='network',
help='variable name of adjacency matrix inside a .mat file.')
parser.add_argument("--output", type=str, required=True,
help="embedding output file path")
parser.add_argument("--rank", default=256, type=int,
help="#eigenpairs used to approximate normalized graph laplacian.")
parser.add_argument("--dim", default=128, type=int,
help="dimension of embedding")
parser.add_argument("--window", default=10,
type=int, help="context window size")
parser.add_argument("--negative", default=1.0, type=float,
help="negative sampling")
parser.add_argument('--large', dest="large", action="store_true",
help="using netmf for large window size")
parser.add_argument('--small', dest="large", action="store_false",
help="using netmf for small window size")
parser.set_defaults(large=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s') # include timestamp
now = time.time()
if args.large:
netmf_large(args)
else:
netmf_small(args)
train_time = time.time() - now
print('train time: %g' % train_time)
|
[
"samihaija@yahoo.com"
] |
samihaija@yahoo.com
|
ded166bd201ae177beb2bcf2cf992a42f026e48f
|
4f4ec663b3acfc22ac8ae7336dec18d28a691b9f
|
/image_Scrap.py
|
f9e56fe16761c469d84b9ad2fee9b458e5e60dec
|
[] |
no_license
|
lakshsharma07/Influencer-predictor
|
ff347f9db87acd1603555a5aba1b88202119397b
|
e320481ccdebdf0cc31d8eacba7f7069c6eac076
|
refs/heads/master
| 2020-03-25T20:30:10.603398
| 2019-06-26T17:54:29
| 2019-06-26T17:54:29
| 144,133,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
from bs4 import BeautifulSoup
import requests
handle = raw_input('Input your account name on Twitter: ')
temp = requests.get('https://twitter.com/'+handle)
bs = BeautifulSoup(temp.text,'lxml')
pic=bs.find_all("img",{"class": "ProfileAvatar-image "})
url=pic[0]['src']
from PIL import Image
import requests
from io import BytesIO
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.show()
|
[
"sharma1997lak@gmail.com"
] |
sharma1997lak@gmail.com
|
26a85bf9be92af4ff2847a5f6ffb6fc857174409
|
4abbdd9cbe83d28e900e22666a077337e89b741a
|
/smallspider/spider_28_asyncio.py
|
e88a8b7960e4a684dae254a69d7c104b4fd5cb85
|
[] |
no_license
|
Pysuper/small_spider
|
4f0394a3853d9803d27f9dd86f570398ad9a5af5
|
33e38424b93bb76b58862d7a9bd82741a680f599
|
refs/heads/master
| 2022-12-11T17:14:52.509160
| 2020-09-24T13:37:34
| 2020-09-24T13:37:34
| 184,028,706
| 2
| 1
| null | 2022-07-29T22:36:03
| 2019-04-29T08:13:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Zheng Xingtao
# File : spider_28_asyncio
# Datetime : 2020/8/21 上午9:19
import asyncio
import aiohttp
from lxml import etree
from pprint import pprint
loop = asyncio.get_event_loop()
async def get_category():
async with aiohttp.ClientSession() as session:
while True:
try:
async with session.get("http://www.nipic.com/photo") as response:
html = await response.text()
x_html = etree.HTML(html)
category_href_list = x_html.xpath("//div[@class='menu-box-bd']/dl/dd/a/@href")
category_title_list = x_html.xpath("//div[@class='menu-box-bd']/dl/dd/a/text()")
for category_href, category_title in zip(category_href_list, category_title_list):
category_task = {
'category_title': category_title,
'category_href': "http://www.nipic.com" + category_href,
}
loop.create_task(parse_detail(session, category_task))
except Exception as e:
print(e)
await asyncio.sleep(5)
async def parse_detail(session, task):
try:
async with session.get(task["category_href"]) as response:
html = await response.text()
x_html = etree.HTML(html)
detail_href_list = x_html.xpath("//div[@class='mainV2']//li/a/@href")
detail_title_list = x_html.xpath("//div[@class='mainV2']//li/a/@title")
for detail_href, detail_title in zip(detail_href_list, detail_title_list):
detail_task = {
'category_href': task["category_href"],
'category_title': task["category_title"],
'detail_href': detail_href,
'detail_title': detail_title
}
loop.create_task(download_img(session, detail_task))
except Exception as e:
print(e)
await asyncio.sleep(5)
async def download_img(session, detail_task):
try:
async with session.get(detail_task["detail_href"]) as response:
html = await response.text()
x_html = etree.HTML(html)
img_href = x_html.xpath("//img[@class='works-img']/@src")[0]
# for i in img_href:
# print(i)
pprint(detail_task)
print(img_href)
except Exception as e:
print(e)
await asyncio.sleep(5)
# python 3.7
# asyncio.run(get_category())
# python 3.5
asyncio.get_event_loop().run_until_complete(get_category())
|
[
"zxt@yj0407"
] |
zxt@yj0407
|
bec248fcae9a6598aed74b05d28f8eb2b0b7c95e
|
6ec4b51f7298b2daf4211518a6146ee4b0746a1c
|
/products/migrations/0002_auto_20210718_1356.py
|
fd0dd0b60e25eb0b99901f2e644ee64c426a43f2
|
[] |
no_license
|
Code-Institute-Submissions/proper_clobber
|
d2f88a495527ffb28eb999c9380ff2e24567891d
|
574297bfb669c71f4d2bd16c122ee547f0261c9b
|
refs/heads/master
| 2023-08-19T07:32:52.652181
| 2021-09-18T20:33:20
| 2021-09-18T20:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# Generated by Django 3.2.4 on 2021-07-18 13:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='has_clothing_size',
new_name='has_sizing',
),
migrations.RemoveField(
model_name='product',
name='has_footwear_size',
),
]
|
[
"franky.p@hotmail.co.uk"
] |
franky.p@hotmail.co.uk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.