blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f98e67156bd92bf6e4c796ae0c76000bd169bcc
|
89f1364334950ceec7eb82e79dfc1146b770a46f
|
/sync_report/migrations/0004_last_sync_date_id_class_name.py
|
e8f5866633366e35145445a08b5d817d3383145d
|
[] |
no_license
|
swapnil106111/Nalanda_Dashboard
|
3b1fec0d42c0cb6cfde8c52c0081cc6ba8e7b705
|
2912f0d64b911084f8e104011afe25c185bc1613
|
refs/heads/master
| 2020-03-27T21:38:44.811323
| 2019-02-27T09:28:29
| 2019-02-27T09:28:29
| 147,163,695
| 0
| 0
| null | 2018-09-03T06:53:37
| 2018-09-03T06:53:37
| null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-02-12 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sync_report', '0003_auto_20190212_0758'),
]
operations = [
migrations.AddField(
model_name='last_sync_date',
name='id_class_name',
field=models.CharField(default=123, max_length=200),
preserve_default=False,
),
]
|
[
"swap106111@gmail.com"
] |
swap106111@gmail.com
|
aefc689aff365cd6cd4cb7505006a44022974310
|
a147fa3a2d1aec2e1a7ef3b8b675197ff38a5f4c
|
/expensefixer.py
|
7ba844968efa01cfca219d69255824d5d84146ea
|
[] |
no_license
|
dworin/FreshbooksExpenseFixer
|
e700febf0fd714733158cd64d3693735671163b8
|
831828c6f42d1227449fcca23f7251887746018d
|
refs/heads/master
| 2021-01-16T21:23:52.198639
| 2013-08-16T19:23:19
| 2013-08-16T19:23:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
__author__ = 'David Dworin'
from refreshbooks import api
c = api.TokenClient(
'YOUR_SITE_ADDRESS.freshbooks.com',
'YOUR_API_TOKEN',
user_agent='DavidsChaseExpenseFixer/1.0'
)
expenses = c.expense.list(page='100')
print "There are %s pages of expenses." % (
expenses.expenses.attrib['pages'],
)
for i in range(1,int(expenses.expenses.attrib['pages'])+1):
expenses = c.expense.list(page=str(i))
for myexpense in expenses.expenses.expense:
print "Expense(%s) Notes: %s Vendor: %s" % (
myexpense.expense_id,
str(myexpense.notes).strip(),
myexpense.vendor
)
if(myexpense.vendor==''):
print "Fixing the Vendor on Expense %s" % (myexpense.expense_id)
response = c.expense.update(
expense=dict(
expense_id=myexpense.expense_id,
vendor=str(myexpense.notes).strip()
)
)
print "All Done!"
|
[
"dworin@gmail.com"
] |
dworin@gmail.com
|
46db0d8f9e71286c4f3ed5f14b4ad29ae4029cba
|
99052370591eadf44264dbe09022d4aa5cd9687d
|
/install/lib/python2.7/dist-packages/cwru_action/msg/_cart_moveActionFeedback.py
|
d2eda06fd03c8ca0980303f9c1bb86a9a0d5a72c
|
[] |
no_license
|
brucemingxinliu/ros_ws
|
11b1a3e142132925d35b3adf929f1000392c5bdc
|
45f7e553ea20b79e3e93af5f77a1b14b64184875
|
refs/heads/master
| 2021-01-24T03:36:47.043040
| 2018-02-26T00:53:37
| 2018-02-26T00:53:37
| 122,892,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,445
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from cwru_action/cart_moveActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
import actionlib_msgs.msg
import cwru_action.msg
import std_msgs.msg
class cart_moveActionFeedback(genpy.Message):
_md5sum = "5a6929cb3514eabf286ff2717ef7a5a4"
_type = "cwru_action/cart_moveActionFeedback"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
cart_moveFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: cwru_action/cart_moveFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback: optional;
int32 fdbk
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','cwru_action/cart_moveFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(cart_moveActionFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = cwru_action.msg.cart_moveFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = cwru_action.msg.cart_moveFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.feedback.fdbk))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = cwru_action.msg.cart_moveFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
start = end
end += 4
(self.feedback.fdbk,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.feedback.fdbk))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = cwru_action.msg.cart_moveFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
start = end
end += 4
(self.feedback.fdbk,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_i = struct.Struct("<i")
_struct_3I = struct.Struct("<3I")
_struct_B = struct.Struct("<B")
_struct_2I = struct.Struct("<2I")
|
[
"mxl592@case.edu"
] |
mxl592@case.edu
|
fb8dfefe0dc4c55e2506a37d3f162f3a82d9fd0a
|
963f79e9bfa46cd472d2ded512107e446694f590
|
/proginn_ping/proginn_ping.py
|
c2590372f603ff2bb820adf076263fcafdf4282c
|
[
"MIT"
] |
permissive
|
bluehole333/spider
|
3081fe42479fc4d66fbcaf3d6cc34b22c4c21a0b
|
0540b7495306224b1e54ba1609718fa1b4d0eb6a
|
refs/heads/master
| 2020-07-28T20:12:23.011446
| 2019-09-29T11:10:58
| 2019-09-29T11:10:58
| 209,523,078
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,380
|
py
|
# -*- coding: utf-8 -*-
"""
程序员客栈 自动ping
Python3.6
"""
import os
import yaml
import time
import requests
import memcache
from selenium import webdriver
from bs4 import BeautifulSoup
from requests.exceptions import *
from selenium.webdriver.common.proxy import Proxy, ProxyType
LOGIN_URL = 'https://www.proginn.com'
PING_URL = 'https://www.proginn.com/wo/work_todo'
class ProxySpider(object):
def __init__(self):
"""
初始化cache
"""
self.cache_key = "PROXYSPIDERX"
self.cache = memcache.Client(['127.0.0.1:11211'], debug=True)
self.hash_cache = True if self.cache.get_stats() else False
def test_proxy(self, proxy):
proxies = {
"http": "http://%(ip)s:%(port)s" % proxy,
}
if 'HTTPS' in proxy['proxy_type']:
proxies.update({
"https": "https://%(ip)s:%(port)s" % proxy,
})
# 请求官网首页测试代理是否有效
try:
if requests.get(LOGIN_URL, proxies=proxies, timeout=3).status_code == 200:
return True
except (ConnectTimeout, ProxyError, ReadTimeout) as e:
return False
def spider_proxy_ip(self):
print("爬取代理网站...")
proxy = []
session = requests.Session()
session.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Host': 'www.kxdaili.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection': 'keep-alive'
}
html = session.get("http://www.kxdaili.com/dailiip/1/1.html").text
soup = BeautifulSoup(html, 'lxml')
for table_tr in soup.find_all('tr'):
if not table_tr.find_all('td'): continue
proxy_item = {
'ip': table_tr.find_all('td')[0].text,
'port': table_tr.find_all('td')[1].text,
'proxy_type': table_tr.find_all('td')[3].text,
}
if not self.test_proxy(proxy_item): continue
proxy.append(proxy_item)
if self.hash_cache:
self.cache.set(self.cache_key, proxy, 3600 * 24)
print("共爬取%s有效代理..." % len(proxy))
return proxy
@property
def proxy(self):
proxys = self.cache.get(
self.cache_key) or self.spider_proxy_ip() if self.hash_cache else self.spider_proxy_ip()
return proxys
class ProginnPing(object):
def __init__(self, user_name, passwd):
"""
初始化基本信息
"""
# 初始化用户名和密码
self.user_name = user_name
self.passwd = passwd
def wait_input(self, ele, text):
for item in text:
ele.send_keys(item)
time.sleep(0.5)
def get_proxy(self):
proxy_spider_dict = ProxySpider().proxy[0]
proxy = Proxy()
proxy.proxy_type = ProxyType.MANUAL
proxy.http_proxy = "%(ip)s:%(port)s" % proxy_spider_dict
if 'HTTPS' in proxy_spider_dict.get('proxy_type'):
proxy.ssl_proxy = "%(ip)s:%(port)s" % proxy_spider_dict
print("使用代理", proxy_spider_dict)
return proxy
def ping(self):
# 使用docker远程调用
# option = webdriver.Remote(
# command_executor="http://chrome:4444/wd/hub",
# desired_capabilities=DesiredCapabilities.CHROME
# )
firefox_options = webdriver.FirefoxOptions()
proxy = self.get_proxy()
desired_capabilities = webdriver.DesiredCapabilities.FIREFOX
proxy.add_to_capabilities(desired_capabilities)
print("开始...")
# 设置火狐为headless无界面模式
firefox_options.add_argument("--headless")
firefox_options.add_argument('--no-sandbox')
firefox_options.add_argument('--incognito')
firefox_options.add_argument('--start-maximized')
option = webdriver.Firefox(firefox_options=firefox_options, desired_capabilities=desired_capabilities,
timeout=10)
option.maximize_window()
print("打开登录页面")
option.get(LOGIN_URL)
option.implicitly_wait(3)
print("打开登录悬浮框")
# 1. 打开登录悬浮框
links = option.find_element_by_xpath('//a[@class="item login ajax_login_btn"]')
links.click()
# 2. 激活手机号登录窗口
option.find_element_by_id('J_ChangeWay').click()
# 3. 输入用户名
user_name = option.find_element_by_xpath('//input[@placeholder="您的手机号"]')
print('正在输入账号.....')
self.wait_input(user_name, self.user_name)
time.sleep(1)
# 4. 输入密码
upass = option.find_element_by_id('password')
print('正在输入密码....')
self.wait_input(upass, self.passwd)
time.sleep(1)
# 5. 点击登录按钮
butten = option.find_element_by_id('login_submit')
time.sleep(1)
butten.click()
# 6. 登录成功页面跳转
print('正在跳转ping页面....')
option.get(PING_URL)
# 延迟操作3秒
option.implicitly_wait(3)
# 7. 点击ping
option.find_element_by_xpath('//span[@data-position="bottom right"]').click()
print("点击ping成功")
return True
def go():
file_name = 'pro.yaml'
error = "请添加pro.yaml文件到当前目录,并添加以下内容到yaml文件中:\nusername: 用户名 \npassword: 登录密码"
if not os.path.exists(file_name):
print(error)
return False
file = open(file_name, 'r', encoding="utf-8")
user_info = yaml.load(file.read(), Loader=yaml.SafeLoader)
file.close()
if not user_info.get('username') or not user_info.get('password'):
print(error)
return False
ProginnPing(str(user_info['username']), str(user_info['password'])).ping()
return True
if __name__ == '__main__':
go()
|
[
"bluehole333@foxmail.com"
] |
bluehole333@foxmail.com
|
94036e1a9b4aa7e367668e2e3b058c27028e0ebe
|
2a8b00122bd6842b0ad0e8408ff959ad09918faa
|
/pybcm/deprecated/bcmdata.py
|
217c370efbea25d0d3eb67983027487cbb688763
|
[] |
no_license
|
Geekly/pybcm
|
3ee1d1d397c43e3fbb3e29053866ac4d0ab72a9e
|
610cd96f210cc3451834409b5d5f50448d812152
|
refs/heads/master
| 2023-06-09T21:12:32.729983
| 2023-06-02T11:35:21
| 2023-06-02T11:35:21
| 8,487,052
| 1
| 0
| null | 2022-12-07T23:59:24
| 2013-02-28T20:09:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 19,572
|
py
|
"""
Created on Oct 30, 2012
@author: khooks
"""
# from numpy import *
import logging
from collections import defaultdict
from operator import itemgetter
import numpy as np
import numpy.ma as ma
from vendors import VendorMap
class BCMData():
""" Contains the core data of pybcm.
It contains several constant references, as well as mutable data, that
is used by other classes in the system.
Attributes:
vendorsortdict(): returns a dictionary that defines sorting
removevendor( vendorid ): remove a vendor from the vendorlist
removevendors( vendorindices ): remove the list of vendor indices
from the vendorlsit
replacevendorlist(): replace the vendor with
avgprices( ): return an array of average element prices
"""
def __init__(self, bcmDict, wantedDict, elemDict, venDict):
self.vendorList = list() # contains the active list of vendors.
# All matrix keys match this list.
self.elementList = list() # contains the list of elements.
# All matrix keys match this list.
self.wanted = None # numpy array
self.prices = None # numpy array
self.stock = None # numpy array
self.bcmDict = bcmDict
self.__initialize_lists(self.bcmDict)
self.wantDict = wantedDict
self.elemDict = elemDict
self.venDict = venDict
self.wanted = self.__buildwantedarray(self.wantDict)
self.stock, self.prices = self.__buildVendorArrays()
self.__vs = VendorStats(self)
self.__update()
self.__need_rebuild = False
# self. = None
# self.VENDSORTLIST = self.__createvendsortinglist(self.BCMDICT)
def __update(self):
"""Update the various arrays."""
self.__sortlists()
self.__updatearrays()
self.__vs.update(self)
# self.AVGPRICES = self.avgprices(stockweighted=True)
def __initialize_lists(self, bcmdict):
""" Build the initial elementlist and vendorlist based on bcmdict."""
logging.info("Building BCMData lists")
#k = [ keytuple for keytuple in bcmdict.keys() ]
for keytuple in list(bcmdict.keys()):
(elementid, vendorid) = keytuple
addtolist(self.vendorList, vendorid)
addtolist(self.elementList, elementid)
def __updateArrays(self):
"""Build the various arrays."""
logging.info("Forcing array update...")
self.wanted = self.__buildwantedarray(self.wantDict)
self.__updatevendorarrays()
self.__need_rebuild = False
def __buildVendorArrays(self):
""" Iterate over element, vendor in the elementlist and vendorlist to create the numpy
arrays. Get the data from the bcmdict.
"""
shape = (len(self.elementList), len(self.vendorList))
priceArray = ma.masked_array(np.zeros(shape, dtype='int'))
stockArray = ma.masked_array(np.zeros(shape, dtype='int'))
# wanted doesnt change
for eindex, element in enumerate(self.elementList):
for vindex, vendor in enumerate(self.vendorList):
if (element, vendor) in self.bcmDict:
stockArray[eindex, vindex] = int(self.bcmDict[element, vendor][0])
priceArray[eindex, vindex] = int(self.bcmDict[element, vendor][1] * 100)
# clip the max value of stock to the wanted quantity
stockArray = np.minimum(stockArray, self.wanted.reshape(len(self.elementList), 1))
mask = stockArray <= 0
stockArray.mask = mask
priceArray.mask = mask
return stockArray, priceArray
def __updatevendorarrays(self):
""" Create new arrays in case elementlist and vendorlits have changed size """
stockarray, pricearray = self.__buildVendorArrays()
self.prices = pricearray
self.stock = stockarray
return stockarray, pricearray
def __buildwantedarray(self, wantedDict): # returns numpy array of WANTED items
""" Create a numpy array of wanted quantities """
logging.info("Building WANTED array...")
m = len(self.elementList) # ensure the size of the array is consistent with the others
wantedArray = np.ndarray(shape=(m,), dtype=np.int)
for eidx, elementId in enumerate(self.elementList):
wantedArray[eidx] = wantedDict[elementId]
return wantedArray
def __sortlists(self):
self.__elementsort()
self.__vendorsort()
def __elementsort(self, sortweights=None):
logging.info("Sorting Element List...")
if sortweights:
weights = sortweights
else:
weights = self.elementweights()
# resort the elementlist using these weights
self.elementList = [y for (x, y) in sorted(zip(weights, self.elementList), reverse=True)]
def __vendorsort(self, sortby='unique_items'):
logging.info("Sorting Vendor List...")
if sortby == 'unique_items':
weights = self.__vs.itemspervendor
elif sortby == 'total_items':
weights = self.__vs.totalvendor
else:
return # nothing sorted
self.vendorList = [y for (x, y) in sorted(zip(weights, self.vendorList), reverse=True)]
def __createvendsortinglist(self):
""" Return list of tuples (vendor index, element index, price)
The element index is the element of the highest weight that the vendor offers in sufficient qty
"""
factor = 1.0
k = list()
for vidx, vcol in enumerate(self.stock.T):
for eidx, stock in enumerate(vcol):
if stock > self.wanted[eidx] * factor:
price = self.prices[eidx, vidx]
stock = self.stock[eidx, vidx]
k.append((vidx, eidx, price, stock))
break
# now sort this list on eidx=>ascending, price=>descending, stock->satisfies(eidx)
k = sorted(k, key=itemgetter(1, 2))
return k
def vendorsortdict(self):
"""Return a dictionary describing how to sort vendors
k[vendor index] = (elementid to sort on, price, qty)
Important: It's assumed that the arrays are already sorted on the
element weights, meaning the most costly elements are first. This
algorithim uses the first element the vendor stocks in sufficient quantity
"""
#size = len(self.vendorlist)
#k = np.zeros(shape=(size), dtype=(int, float))
k = dict()
for vidx, vcol in enumerate(self.prices.T): # iterate over columns in the price matrix
for eidx, price in enumerate(vcol):
if price > 0:
qty = self.stock[eidx, vidx]
k[vidx] = (eidx, price, qty)
break
return k
def __sufficientqty(self, eidx, vidx, factor=0.5):
return self.stock[eidx, vidx] >= self.wanted[eidx] * factor
def removevendor(self, vendorid):
#doesn't remove it from the .data, only from the list of vendors
assert vendorid in self.vendorList, "Vendor %r does not exist in vendorlist" % vendorid
self.vendorList.remove(vendorid)
self.__need_rebuild = True
def removevendors(self, vendorindices):
#Create a new vendorlist that doesn't include the list of id's passed via vendorindices
logging.info("Trying to remove " + str(len(vendorindices)) + " vendors.")
before = len(self.vendorList)
newlist = [vendor for vendor in self.vendorList if self.vendorList.index(vendor) not in vendorindices]
self.replacevendorlist(newlist)
after = len(self.vendorList)
logging.info("Removed: " + str(before - after) + " vendors.")
self.__update()
return newlist
def replacevendorlist(self, newvendors):
self.vendorList = newvendors
#remove all items that contain these vendors from the dictionaries?
self.__update()
def avgprices(self, stockweighted=False):
"""Return a masked array of the average price by element"""
p = ma.array(self.prices, mask=self.prices <= 0)
if stockweighted:
s = ma.array(self.stock, mask=self.stock <= 0)
avgprices = ma.average(p, weights=s, axis=1)
else:
#avgprices = p.sum(axis=1)/(p > 0).sum(axis=1) #denominator sums the non-zero values
avgprices = ma.average(p, axis=1)
return avgprices
def elementweights(self):
#generate a weight for each element - basically the avg price for that element * WANTED qty, normalized
weights = self.wanted * self.avgprices()
return weights
def __itemspervendor(self):
s = self.stock
itemspervendor = np.ndarray(s > 0).sum(0)
return itemspervendor
def partcount(self):
"""count the number of unique parts and the total number of parts"""
return len(self.wanted), self.wanted.sum()
class BCMManager(object):
""" Provides encapsulation of a BCMData object
contains a dictionary that allows access via data[elementid, vendorid] = (price, qty)
"""
def __init__(self, bricklink, wanteddict):
if not isinstance(bricklink.vendor_map, VendorMap):
raise Exception("vendor_map doesn't exist.")
self.bcmdict = self.__createbcmdict(bricklink)
# TODO: check dictionaries for validity prior to creating the BCMData object
# self.data[elementid, vendorid] = (price, qty) #essentially a copy of the Bricklink data with a different lookup.
# Don't change this once initialized
self.data = BCMData(self.__createbcmdict(bricklink),
self.__createwanteddict(bricklink, wanteddict),
self.__createelementdict(self.bcmdict),
self.__createvendict(self.bcmdict))
self.initialized = False
# overload the default _get function. If the key combo doesn't exist, return a 0,0 pair
@staticmethod
def __createwanteddict(bricklink, wanteddict):
logging.info("Building Wanted Dictionary")
wanted = dict()
for elementid in list(bricklink.keys()): # bricklink.data only has one key - the elementid
wanted[elementid] = wanteddict[elementid].wantedqty # populate the WANTED qty dictionary
return wanted
def __createbcmdict(self, bricklink):
"""Creates a dictionary keyed to the vendor and an element that contains the qty
and price for each vendor/element pair
"""
logging.info("Building soln(Element,Vendor) Dictionary")
bcm = dict()
#create the price array
#create the STOCK array
for elementid in list(bricklink.keys()): # bricklink.data only has one key - the elementid
# iterate over the list of vendors in bricklink[elementid] = (vendorid, qty, price)
for vendorinfo in bricklink[elementid]:
vendorid = str(vendorinfo[0])
vendorqty = int(vendorinfo[1])
vendorprice = float(vendorinfo[2])
bcm[elementid, vendorid] = (vendorqty, vendorprice)
self.initialized = True
return bcm
@staticmethod
def __createelementdict(bcmdict):
logging.info("Building Element-wise Dictionary")
elementdict = defaultdict(list)
for keys, values in list(bcmdict.items()):
element, vendor = keys
qty, price = values
elementdict[element].append((vendor, qty, price))
for element, plist in list(elementdict.items()):
#sort the list price
sortedlist = sorted(plist, key=itemgetter(2))
elementdict[element] = sortedlist # reassign the sorted list instead
#print sortedlist
return elementdict
@staticmethod
def __createvendict(bcmdict):
logging.info("Building Vendor-wise Dictionary")
vendict = defaultdict(list)
for keys, values in list(bcmdict.items()):
element, vendor = keys
qty, price = values
vendict[vendor].append((element, qty, price))
return vendict
def presolve(self):
"""Prunes the data before looking for a solution.
"""
self.data.replacevendorlist(self.prunedvendorsbyavgprice())
@staticmethod
def describesolution(result):
if result:
pass
def describevendors(self):
#print out some information about the vendors
print("There are " + str(len(self.vendorlist)) + " in Vendorlist")
def getqtyandprice(self, elementid, vendorid):
assert ((elementid, vendorid) in list(self.bcmdict.keys())), "ElementID %r, VendorID %r not found" % (
elementid, vendorid)
(qty, price) = self.bcmdict[elementid, vendorid]
return qty, price
def getvendorlist(self):
return self.data.vendorList
def getelementlist(self):
return self.data.elementList
#prune vendors that are above average in price
def prunedvendorsbyavgprice(self, pricefactor=1.0):
"""Remove vendor id from the active vendorlist based on relative vendor pricing
Args:
pricefactor, optional: remove vendor if pricing is > pricefactor*average pricing
"""
#prune the vendors that are more greater than pricefactor * average (0.5 keeps average and cheaper)
logging.info("Removing vendors with above-average pricing")
data = self.data
removethese = list()
p = self.data.prices
avgprices = data.avgprices() #same indices as elementlist
for element, vendor in list(self.bcmdict.keys()):
eindex = data.elementList.index(element)
vindex = data.vendorList.index(vendor)
if p[eindex][vindex] > pricefactor * avgprices[eindex]:
if vindex not in removethese:
removethese.append(vindex)
newlist = data.removevendors(removethese)
#data.update()
return newlist
def vendorstats(self):
"""Return a dictionary with an entry containing stats for each vendor
vdict[vendor] = (num of stocked components, price factor)
"""
# TODO: make this work
def cullvendorsbyprice(self):
# TODO: NOT COMPLETE
cheapvendoridx = self.sortedvendoridx()
#keep the n cheapest
#make a new list containing only these vendors
initial_length = len(self.data.vendorList)
cheapvendors = [self.data.vendorList[i] for i in cheapvendoridx]
self.data.replacevendorlist(cheapvendors)
# self.data.__update()
finallength = len(self.data.vendorList)
removed = initial_length - finallength
logging.info("Removed " + str(removed) + " vendors from the list")
#print(cheapvendors)
def sortedvendoridx(self):
# returns a masked array of the sorted vendor indices, masking the 0.0 values
p = self.data.prices
s = p.argsort(axis=1) # sort array of vendor indices are now sorted by s
static_indices = np.indices(p.shape)
psorted = p[static_indices[0], s]
sortedmask = psorted <= 0.0 # p and pmask share the same indices
m = ma.array(s, mask=sortedmask) # a masked array of sorted vendor indices, sorted by price of element
return m
def addtolist(alist, value):
"""Add value to alist if it doesn't exist"""
if value not in alist:
# string = "Adding value: " + str( value) + " to list " + str( alist)
# logging.(string)
alist.append(value)
return True
# compressed, sorted, masked array of vendor indices
# def hasminquantity(self, elementid, vendorid ):
# assert vendorid in vendor_map, "Cannot determine qantity, vendor %r does not exist in vendorlist" % vendorid
# #assert vendorid in self.vendor_map, "Cannot determine qantity, vendor %r does not exist in vendorlist" % vendorid
#
# if (elementid, vendorid) in self.BCMDICT:
# wantedquantity = int(self.WANTED[elementid])
# return (self.BCMDICT[elementid, vendorid][0]) >= wantedquantity
# else:
# return False
# def cheapvendorsbyitem(self, nvendors):
# #keep the cheapest N vendors for each item
# #at most, this leaves us with NumElements x N vendors
# #use the pricearray and loop over vendor list
# #msorted = self.sortedvendoridx() #this is a list of vendor indices, sorted and masked > 0
# cheap = self.data.PRICES
# avg = self.data.AVGPRICES
# mask = ((cheap.T <= avg) & (cheap.T > 0.0)).T
#
# return cheap, mask
# def sortedvendorlists(self):
# # data: bcmdata opbject
# # assign a sorted list of vendor id's for each element
# # sev[elementid] = [ vendorid32, vendorid2, vendorid7, ...]
# e = self.data.elementlist
# v = self.data.vendorlist
#
# elementvendors = dict()
#
# for keys, values in self.BCMDICT.items():
# element, vendor = keys
# qty, price = values
#
#
# #priceidx = [ index for index, price in enumerate(self.data.PRICES[eindex]) if price > 0]
# #pairs = sorted( zip(priceidx, v), reverse = True )
# #vorder = [ vidy for (x, vidy) in pairs]
# #sort the list of vendors by element price
# #elementvendors[element] = vorder
# return elementvendors
#
# def sortedelementidx(self):
# #returns a lsit of the indices of self.elementlist sorted by weight (descending)
# elementweights = self.data.elementweights()
# elementindexlist = [index for index, elementid in enumerate(self.elementlist) ]
# pairs = sorted( zip(elementweights, elementindexlist), reverse = True ) # (weight, elementindex) tuples sorted on weight
# elementorder = [ eidy for (x, eidy) in pairs] #this is the order to search elements
# return elementorder
#
# def vendorweights(self):
# eleweights = self.data.elementweights()
#
#
# #for item in self.item
#
#
#
# def maparray2vendorid(self, array):
# d = dict()
# #width of array must be equal to length of vendorlist
# shape = array.shape
# if shape[1] == len(self.vendorlist):
# for index, col in enumerate(array.T):
# vendorid = self.vendorlist[index]
# d[vendorid] = col
#
# return d
#
# def rawshoppinglist(self, result):
# #for each vendor, item & quantity
# #converts result array from Opt into a vendorid, elementid dictionary
# rawshoppinglist = dict() #rawshoppinglist[vendorid, elementid] = qty
# if result.any():
# r = result
# for vindex, vendor in enumerate(r.T): #iterate over columns in result
# if any(val > 0 for val in vendor): #check if any values in column 'vendor' are greater than zero
# eindices = np.nonzero(vendor)
# for eindex in eindices[0]:
# #print (vendor, index)
# vendorid = self.data.vendorlist[vindex]
# elementid = self.data.elementlist[eindex]
# rawshoppinglist[vendorid, elementid] = r[eindex, vindex] #convert back to a column
# #print( shoppinglist)
#
# return rawshoppinglist
#
# else:
# print("No result set found")
# return False
#
# def printdata(self):
# for item in self.BCMDICT.items():
# print (item)
|
[
"khooks@gmail.com"
] |
khooks@gmail.com
|
b4ee1c0cb839bb0df1f176c4a10940a5cb20ecd8
|
8176417a4ec9c93dc2a0142a629de3e24c5935da
|
/migrations/versions/6ae2d6dded93_.py
|
2b55ee7339082f71a9ac52ad2ac454196e52d543
|
[] |
no_license
|
jamesinsheffield/1000Words
|
af6935034816de95d0b55a4b39d6ddb1af0130cc
|
b4e38a85721c62418a5efc3ed1f0d6577daedd63
|
refs/heads/master
| 2021-06-25T03:26:19.044594
| 2020-08-16T10:36:06
| 2020-08-16T10:36:06
| 194,422,698
| 0
| 0
| null | 2021-03-20T01:20:17
| 2019-06-29T15:50:45
|
Python
|
UTF-8
|
Python
| false
| false
| 876
|
py
|
"""empty message
Revision ID: 6ae2d6dded93
Revises:
Create Date: 2020-08-09 17:07:45.493167
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6ae2d6dded93'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('words',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mark', sa.Integer(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('romanian', sa.String(), nullable=True),
sa.Column('english', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('words')
# ### end Alembic commands ###
|
[
"jamesinsheffield@hotmail.com"
] |
jamesinsheffield@hotmail.com
|
ff99d46b0f0ebc6112fa72d36496b55f8858bd34
|
4ff66f5d34b602d286ccf7661ebf8ab451135433
|
/backend/app/__init__.py
|
7a9ec147868c31b1c8d0991197aba1b45e9dea95
|
[] |
no_license
|
Plawn/LostPass
|
ea96aa64bab63ba21059a5e46bf4112072b99659
|
3817a696e8c1c01ea151eb5d02870b736ba91be0
|
refs/heads/master
| 2023-07-20T06:38:20.655154
| 2022-06-01T14:40:54
| 2023-01-27T14:57:14
| 253,583,307
| 0
| 0
| null | 2023-07-18T21:36:11
| 2020-04-06T18:31:01
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 46
|
py
|
from . import ressources
from .api import app
|
[
"plawn.yay@juniorisep.com"
] |
plawn.yay@juniorisep.com
|
8ca75affe6937bc42958158b666799c2c3f053a4
|
a1a518ba04855820f531c705c36028e4d7435a86
|
/tests/python/unittest/test_tir_schedule_reduction.py
|
5f5daa144e966998594dc5a2db2784a8afa2cf20
|
[
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
ZQPei/tvm
|
b47f7a3f16400774eefb5ca882a0053e46176a52
|
6c32f976522aa1d923fcfe364f05a7860cb346b4
|
refs/heads/main
| 2021-12-10T22:33:44.248391
| 2021-11-30T23:58:05
| 2021-11-30T23:58:05
| 203,511,290
| 0
| 1
|
Apache-2.0
| 2021-11-25T09:28:32
| 2019-08-21T05:17:34
|
Python
|
UTF-8
|
Python
| false
| false
| 9,082
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def rowsum_blockized(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [32, 4])
A = T.match_buffer(a, [32, 4, 128])
for i0, i2_0 in T.grid(32, 16):
with T.block("blockized_B"):
io, ko = T.axis.remap("SR", [i0, i2_0])
with T.init():
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii_init = T.axis.S(4, i1)
B[io, ii_init] = 0.0
for i1_1, i2_1 in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1_1)
k = T.axis.R(128, ko * 8 + i2_1)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose0(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [32, 4, 128], elem_offset=0, align=128, offset_factor=1)
B = T.match_buffer(b, [32, 4], elem_offset=0, align=128, offset_factor=1)
for i0 in T.serial(0, 32):
with T.block("blockized_B_init"):
io = T.axis.S(32, i0)
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii = T.axis.S(4, i1)
B[io, ii] = T.float32(0)
for i0, i2_o in T.grid(32, 16):
with T.block("blockized_B_update"):
io, ko = T.axis.remap("SR", [i0, i2_o])
for i1, i2_i in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1)
k = T.axis.R(128, ko * 8 + i2_i)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul_decompose2(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=128, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=128, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=128, offset_factor=1)
for i0, i1 in T.grid(128, 128):
with T.block("update_init"):
vi_init, vj_init = T.axis.remap("SS", [i0, i1])
C[vi_init, vj_init] = T.float32(0)
for i2 in T.serial(0, 128):
with T.block("update_update"):
vi, vj, vk = T.axis.remap("SSR", [i0, i1, i2])
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_decompose_fail3(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, k, j in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose4(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=128, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=128, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=128, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
for i0_0 in T.serial(0, 16):
for i0_1_init, i1_init in T.grid(8, 128):
with T.block("update_init"):
vi_init = T.axis.S(128, i0_0 * 8 + i0_1_init)
vj_init = T.axis.S(128, i1_init)
C[vi_init, vj_init] = T.float32(0)
for i0_1, i1, i2_0, i2_1 in T.grid(8, 128, 19, 7):
with T.block("update_update"):
T.where((((i2_0 * 7) + i2_1) < 128))
vi = T.axis.S(128, i0_0 * 8 + i0_1)
vj = T.axis.S(128, i1)
vk = T.axis.R(128, i2_0 * 7 + i2_1)
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
T.block_attr({"test_annotation": 1})
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_reduction_decompose0():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose0, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose1():
s = tir.Schedule(rowsum_blockized, debug_mask="all")
blockized_B = s.get_block("blockized_B")
io, ko = s.get_loops(blockized_B)
s.decompose_reduction(blockized_B, io)
tvm.ir.assert_structural_equal(matmul_decompose1, s.mod["main"])
verify_trace_roundtrip(s, mod=rowsum_blockized)
def test_reduction_decompose2():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, k)
tvm.ir.assert_structural_equal(matmul_decompose2, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose3():
s = tir.Schedule(matmul_decompose_fail3, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
with pytest.raises(tvm.tir.ScheduleError):
s.decompose_reduction(C, k)
def test_reduction_decompose4():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[16, 8])
ko, ki = s.split(k, factors=[19, 7])
s.decompose_reduction(C, ii)
tvm.ir.assert_structural_equal(matmul_decompose4, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose_with_annotation():
s = tir.Schedule(matmul_with_annotation, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose_with_annotation, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul_with_annotation)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
[
"noreply@github.com"
] |
ZQPei.noreply@github.com
|
d91b0b09a6363d1a5cfb3b8b9ce79db8db0c1305
|
d56885aa49e6541932b1dba223f3a6aa1601ea5d
|
/MovieToFolder.py
|
073706a36bd81b31755c3d2c63f84d87d39fcdd9
|
[
"MIT"
] |
permissive
|
stevin05/SonarrDiskMultiplexer
|
6e11cea91e9da7b55318c11b85b93cbdf3993a34
|
21a74883aef176ab4275644c75ca5be4f1faec9a
|
refs/heads/master
| 2022-04-21T17:08:15.617696
| 2020-04-18T01:45:37
| 2020-04-18T01:45:37
| 256,647,795
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
import os
import sys
from pathlib import Path
import logging
import requests
# Convert my_movie.mkv to my_movie/my_movie.mkv so it's ready for import by radarr
# Run this from the directory with the movie files (like the staging directory)
for movie in Path('.').iterdir():
if movie.is_dir():
continue
Path(movie.stem).mkdir()
print(f'mkdir: {movie.stem}')
target = Path(movie.stem) / movie.name
print(f'rename: {target}')
movie.rename(target)
|
[
"63887600+stevin05@users.noreply.github.com"
] |
63887600+stevin05@users.noreply.github.com
|
9417beb619ae6401d4353809791731b087fa0537
|
9f71bb956fec62c32e3573221deb3ed64c6bdb21
|
/Puzzle9/contiguousSum.py
|
573cf36c9019922233f7105e78e691f8672cda34
|
[
"Apache-2.0"
] |
permissive
|
manasharma90/AoC-2020-Python
|
77654732ab3e78494377b150f1f155f0e7d7f053
|
6a979eff34136b6b74a340c40121da76e35451da
|
refs/heads/main
| 2023-07-16T20:54:59.883773
| 2021-08-19T17:27:50
| 2021-08-19T17:27:50
| 387,441,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
with open('input.txt', 'r') as f:
a = f.read()
input_string = a.splitlines()
input_nums = []
# making a list of all the elements as integers for ease of mathematical operations
for input in input_string:
input_nums.append(int(input))
# the number from the previous puzzle is 2089807806
# further operation will be done only on the numbers preceeding this in the list
split_index = input_nums.index(2089807806)
working_list = input_nums[:split_index]
# we start adding successive elements from the list starting from index 0 until we get 2089807806
# if sum exceeds 2089807806, we terminate the addition and start adding from the next index number
# after exiting while loop, we need to remove the previously added index value from the checked num list using pop
for i in range(len(working_list)):
checked_nums = [working_list[i]]
while sum(checked_nums) <= 2089807806:
for num in working_list[i+1:]:
checked_nums.append(num)
if sum(checked_nums) == 2089807806:
checked_nums.sort()
smallest = checked_nums[0]
largest = checked_nums[-1]
encryption_weakness = smallest + largest
print(encryption_weakness)
checked_nums.pop(0)
|
[
"malvika.sharma@live.com"
] |
malvika.sharma@live.com
|
cfbd7dc3148b8b578e64f12e68feaab0176a207b
|
09e85d81313ccb1e389e5869d663470adb2f9c07
|
/app/tests/movie_test.py
|
e6e1a28d303c7cd43d7ed3aa97af885aa315c32b
|
[] |
no_license
|
Evan-cell/kim-movie
|
c4762b4b8111594cc91825ee2b920b34c1e6d00f
|
f4e5c0753e2c75da2eeac3a999cbf5bf21b49096
|
refs/heads/main
| 2023-08-28T23:51:34.101831
| 2021-11-04T04:02:05
| 2021-11-04T04:02:05
| 424,466,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
import unittest
from app.models import Movie
class MovieTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Movie class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_movie = Movie(1234,'Python Must Be Crazy','A thrilling new Python Series','/khsjha27hbs',8.5,129993)
def test_instance(self):
self.assertTrue(isinstance(self.new_movie,Movie))
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(password = 'banana')
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('banana'))
|
[
"60639510+Evan-cell@users.noreply.github.com"
] |
60639510+Evan-cell@users.noreply.github.com
|
2b55b47506ede995d142ca465e342a5b6d4cd32c
|
53777e590a1478e3c430dc9bc500b4cd45583d5b
|
/src/stylons/stitems/ui.py
|
fda5c81f544529277e109ff1edeff43cc96f2a2d
|
[] |
no_license
|
mkurek/stylons
|
cc254c9338ec63b0d843ee713ad97a954e9373b6
|
abb75d91acce0f469b0eb083d6da9309dd9dada6
|
refs/heads/master
| 2020-07-23T23:49:38.296733
| 2015-01-14T20:39:49
| 2015-01-14T20:39:49
| 4,992,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
class Ui:
uiPrefixes = ('normal', 'back', 'round', 'forward', 'action', 'confirm',
'decline')
uiSuffixes = ('', 'round', 'small')
@classmethod
def isUi(cls, ui):
pre = post = ''
position = ui.find('-')
if position != -1:
pre = ui[:position]
post = ui[position + 1:]
else:
pre = ui
post = ''
return pre in cls.uiPrefixes and post in cls.uiSuffixes
|
[
"marcin.kliks@gmail.com"
] |
marcin.kliks@gmail.com
|
6ad0881f0f5665fab535b939848e74b5ef8be53f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/320/usersdata/297/89238/submittedfiles/lecker.py
|
7c2c081024f3c11904eb15b9afbddb878e330677
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
# -*- coding: utf-8 -*-
import math
a=float(input('digite o numero da primeira casa: '))
b=float(input('digite o numero da segunda casa: '))
c=float(input('digite o numero da terceira casa: '))
d=float(input('digite o numero da quarta casa: '))
if a>b>=c>=d or a<b>c>=d or a<=b<c>d or a<=b<=c<d :
print('S')
else :
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
70ae20806909613a57221cb2d48c8a30fb665da2
|
855e20bc7f7ec2a68f5b436d4d186394370ccc62
|
/setup.py
|
62cf706a784f794cd1d84f12fad57b5433e033bb
|
[
"BSD-3-Clause"
] |
permissive
|
toastedcornflakes/STReam-Annotate
|
69bbfce43237dbc6c554f58ed60188cecc0b8bf1
|
016c9f1d223ec463f638b6e791e922eb1ca2cfea
|
refs/heads/master
| 2020-12-25T14:58:07.504556
| 2016-09-02T20:41:30
| 2016-09-02T21:05:08
| 67,249,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
from setuptools import setup
setup(
name = 'stra',
version = '0.0.1',
author = 'toastedcornflakes',
author_email = 'toastedcornflakes@gmail.com',
description = 'Annotates the output of running a command with the name of the corresponding stream',
license = 'BSD',
url = 'https://github.com/toastedcornflakes/STReam-Annotate',
packages=['stra'],
entry_points={
'console_scripts': [
'stra=stra:entrypoint'
]}
)
|
[
"toastedcornflakes@gmail.com"
] |
toastedcornflakes@gmail.com
|
a9cb003a18900abf7919ff2ad1f0cadd5d5b7980
|
bd377db57ba53462eb09cd402032f1b34991a215
|
/A7.py
|
3e35eaf5bace6a34323f7f7326e712090c45ae3c
|
[] |
no_license
|
tanvisenjaliya/PES-Python-Assignment-SET1
|
5349fecb79affc9757e7d40aa5d13b7c271492f1
|
db98bda4b49ac783651ffb88ac5c7271a3a1eb84
|
refs/heads/master
| 2022-11-24T04:14:00.165349
| 2020-08-04T13:44:38
| 2020-08-04T13:44:38
| 284,992,111
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
str1=input("Enter the list1")
list1=list(str1)
print ("list1=",list1)
list2=list(input("Enter list2 elements"))
print ("list2=",list2)
print ("list1[1:3]=",list1[1:3])
print ("list2[3:-3]=",list2[3:-3])
print ("list1*2=",list1*2)
print ("list1+list2=",list1+list2)
|
[
"noreply@github.com"
] |
tanvisenjaliya.noreply@github.com
|
9ac5c1fa58aa1ff60d4befbbfa02050f56ea9930
|
1ee9d5a2b28f3a233f6e5c229a457c6be5c7b0e6
|
/task5.py
|
6408fa3bab1830f6f0d96ab9051dbeba81f890ba
|
[] |
no_license
|
Mekazara/Chapter3
|
43f01893b0e66e9c0f79910e2c5a02cb3809c462
|
6f694580728692e3925dad85d4e5ab559abb600a
|
refs/heads/master
| 2020-12-01T06:19:12.401726
| 2019-12-30T08:08:11
| 2019-12-30T08:08:11
| 230,573,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
def twoStrings():
p = int(input("Enter the number of test cases: "))
for i in range(p):
s1 = input("Print any word: ").lower()
s2 = input("Print any word: ").lower()
for i in range(p):
count = 0
for letter in s1:
if letter in s2:
count += 1
else:
continue
if count >= 1:
answer = "YES"
else:
answer = "NO"
print(answer)
twoStrings()
# for hackerrank
# count = 0
# answer = None
# for letter in s1:
# if letter in s2:
# answer = "YES"
# count += 1
# else:
# answer = "NO"
# if count >= 1:
# return "YES"
# else:
# return "NO"
# print(answer)
|
[
"mekuchazara@gmail.com"
] |
mekuchazara@gmail.com
|
5f13bc1505fc5e928ab47e71817ac9c23d7a8bad
|
35a2589936ef752f61d0bbfb3b809d811d53002c
|
/preprocessor.py
|
a7db5416f1c5b268e6fe631d2e3124e5d305e4b1
|
[
"MIT"
] |
permissive
|
hexod0t/classifier-bert
|
af4fd55c2a70b6dcbbded660b0bc33c6d9e49540
|
f53354cefff493c36c365d6fe8cab0a2214dddd0
|
refs/heads/master
| 2023-08-05T01:45:17.868055
| 2021-09-17T22:13:40
| 2021-09-17T22:13:40
| 401,207,939
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import torch
from transformers import BertTokenizerFast
class Preprocessor():
def __init__(self):
self.tokenizer = BertTokenizerFast.from_pretrained('./models')
"""
Function tokenize_data
Params: input_text -> sentence that could be true or fake
"""
def tokenize_data(self, text):
sent_id = self.tokenizer.batch_encode_plus(
text, padding=True, return_token_type_ids=False)
return sent_id
"""
Function create_tensors
Params: input_text -> sentence that could be true or fake
"""
def create_tensors(self, tokenized_input):
test_seq = torch.tensor(tokenized_input['input_ids'])
test_mask = torch.tensor(tokenized_input['attention_mask'])
return test_seq, test_mask
|
[
"cris_does@hotmail.es"
] |
cris_does@hotmail.es
|
55b14c6d19a5c6549294bd6f8f4bbfeefb2481c9
|
e38e849bcac7b7865c4b9696df171d806c0226f1
|
/results/preview/2018-06-21-00-00-01-EDT/scripts/train-sklearn-one-model.py
|
3c9ca6b3d70f66c45366ae8a64112d9c6747f644
|
[] |
no_license
|
hning86/ViennaTest
|
99a4e1160050428cf11655454abf84e702109262
|
35b504873b575bee0d4dfdb5eb576276545a1910
|
refs/heads/master
| 2020-03-18T18:32:21.714572
| 2018-09-04T07:02:38
| 2018-09-04T07:02:38
| 135,099,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', type = float, dest = 'alpha', default = 0.5, help = 'regularization strength')
args = parser.parse_args()
# Import Run from azureml.core,
# and get handle of current run for logging and history purposes
from azureml.core.run import Run
run = Run.get_submitted_run()
X, y = load_diabetes(return_X_y = True)
columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
data = {"train": {"x": x_train, "y": y_train},
"test": {"x": x_test, "y": y_test}}
alpha = args.alpha
print('alpha value is:', alpha)
reg = Ridge(alpha = alpha)
reg.fit(data["train"]["x"], data["train"]["y"])
print('Ridget model fitted.')
preds = reg.predict(data["test"]["x"])
mse = mean_squared_error(preds, data["test"]["y"])
# Log metrics
run.log("alpha", alpha)
run.log("mse", mse)
os.makedirs('./outputs', exist_ok = True)
model_file_name = "model.pkl"
# Save model as part of the run history
with open(model_file_name, "wb") as file:
joblib.dump(reg, 'outputs/' + model_file_name)
print('Mean Squared Error is:', mse)
|
[
"haining@microsoft.com"
] |
haining@microsoft.com
|
96d2e4caa83de19960ab83fd8c9fc4b96440ec96
|
6e7f7b6d070d8b2b291226657afe445fc619ab78
|
/Devquery/wsgi.py
|
079f68554d6c0ef73443eee692782aac2eca537d
|
[] |
no_license
|
sakib-shahriar/Devquery
|
51b29cd94cc9c6bc31cd81b40af7b2e87833fd24
|
5cd38a850f07808c83e889325b2360b508a86b42
|
refs/heads/master
| 2022-12-07T07:58:08.138408
| 2019-10-11T21:44:42
| 2019-10-11T21:44:42
| 185,671,424
| 0
| 0
| null | 2022-11-22T03:48:16
| 2019-05-08T19:59:23
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for Devquery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Devquery.settings')
application = get_wsgi_application()
|
[
"sakibnx@gmail.com"
] |
sakibnx@gmail.com
|
c27434a207e10c45443dfe8773f9ca95dd1bd6f4
|
4d2a903ae7e7cbc3ae5070c274bb07aaba4ecd7b
|
/apps/auth/views/logout.py
|
bac71f13ca1faec171bcc0559c6d6d9afe3aab1e
|
[] |
no_license
|
mbaragiola/drf-demo-app
|
14316e1e0c6b6116194bbd6ed7df0922de1f40cb
|
41e3584376c9ac0f7da14e139543eecee0422cdc
|
refs/heads/master
| 2023-01-21T12:33:24.799358
| 2020-11-11T13:36:25
| 2020-11-11T13:36:25
| 269,968,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
from django.contrib.auth import logout as django_logout
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework_jwt.settings import api_settings as jwt_settings
__all__ = ['LogoutViewSet']
class LogoutViewSet(GenericViewSet):
"""
Calls Django logout method and deletes the token
assigned to the current User object.
Accepts/Returns nothing.
"""
permission_classes = (AllowAny,)
serializer_class = None
@action(detail=False, methods=['post'])
def alldevices(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
pass
django_logout(request)
response = Response(
{"detail": "You've been logged out from all devices."},
status=status.HTTP_200_OK
)
response.delete_cookie(jwt_settings.JWT_AUTH_COOKIE)
return response
@action(detail=False, methods=['post'])
def cookie(self, request):
response = Response(
{"detail": "You've been logged out from this device."},
status=status.HTTP_200_OK
)
response.delete_cookie(jwt_settings.JWT_AUTH_COOKIE)
return response
@action(detail=False, methods=['post'])
def token(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
pass
return Response(
{"detail": "You've been logged out from this device."},
status=status.HTTP_200_OK
)
|
[
"mbaragiola@linux.com"
] |
mbaragiola@linux.com
|
9f3a0de3814a277c06f58559e4bac131a7f8bb03
|
62e6a6c2543a2a7f93d1986073e914589d9dc116
|
/learn/13-tuples.py
|
1d28bfd147d80cb4bbc0cc29845191012ec67767
|
[] |
no_license
|
kittyofheaven/learn-python
|
3af9caecffa2d204292611368d20bf1b6ae12e00
|
d0c4cdb36de9f9d2a6fb3e66480f156ca7094b52
|
refs/heads/main
| 2023-08-16T01:14:59.221786
| 2021-10-20T03:55:10
| 2021-10-20T03:55:10
| 404,293,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
t = (1,2,3,4,5,6,7,8,9)
#tuple seperti list tapi dengan tanda kurung, bedanya dengan list
#tuple tidak dapat diubah diganti dihapus dan lebih aman
credit_card = (123123123121, "Shironeko", '10/26', "alamat rumah A3/18", 123)
credit_card1 = (123126723121, "Shiro Neko Senpai", '8/21', "alamat rumah 67", 321)
credit_cards = [credit_card, credit_card1]
print(credit_cards)
person_1 = ('neko', 15, 'male')
person_2 = ('shiro', 17, 'male')
people = [person_1, person_2]
print(people)
for name, age, gender in people :
print()
print(name)
print(age)
print(gender)
|
[
"noreply@github.com"
] |
kittyofheaven.noreply@github.com
|
9f3d9296a257a42b17d7b497de2440862edd849a
|
bfd6ac084fcc08040b94d310e6a91d5d804141de
|
/devel/custom_button.py
|
b4d32e6ccff3b71bb9e085a23c8f817535aa6a0e
|
[] |
no_license
|
jqwang17/HaeffnerLabLattice
|
3b1cba747b8b62cada4467a4ea041119a7a68bfa
|
03d5bedf64cf63efac457f90b189daada47ff535
|
refs/heads/master
| 2020-12-07T20:23:32.251900
| 2019-11-11T19:26:41
| 2019-11-11T19:26:41
| 232,792,450
| 1
| 0
| null | 2020-01-09T11:23:28
| 2020-01-09T11:23:27
| null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
import sys
from PyQt4 import QtGui, QtCore
class BurningWidget(QtGui.QAbstractButton):
def __init__(self):
super(BurningWidget, self).__init__()
self.setMinimumSize(100, 50)
self.setCheckable(True)
def sizeHint(self):
return QtCore.QSize(100,50)
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
size = self.size()
w = size.width()
h = size.height()
if self.isChecked():
brush = QtGui.QBrush(QtCore.Qt.gray, QtCore.Qt.SolidPattern)
qp.setPen(QtCore.Qt.NoPen)
qp.setBrush(brush)
qp.drawRect(0, 0, w - 1, h - 1)
brush = QtGui.QBrush(QtCore.Qt.green, QtCore.Qt.SolidPattern)
qp.setBrush(brush)
qp.drawRect(w/2.0, 3*h/8.0, (w - 1)/4.0, (h - 1)/4.0)
else:
brush = QtGui.QBrush(QtCore.Qt.gray, QtCore.Qt.SolidPattern)
qp.setPen(QtCore.Qt.gray)
qp.setBrush(brush)
qp.drawRect(0, 0, w - 1, h - 1)
brush = QtGui.QBrush(QtCore.Qt.darkGreen, QtCore.Qt.SolidPattern)
qp.setBrush(brush)
qp.drawRect(w/2.0, 3*h/8.0, (w - 1)/4.0, (h - 1)/4.0)
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
self.wid = BurningWidget()
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.wid)
self.setLayout(hbox)
self.setWindowTitle('Burning widget')
self.wid.clicked.connect(self.on_click)
self.show()
def on_click(self, x):
print x
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
[
"micramm@gmail.com"
] |
micramm@gmail.com
|
3842fd91c09f9050b796d822658bf5128f7aebf6
|
7d4bb2c79954fee935cd753b62b7dd43d98a9411
|
/src/movies/schema.py
|
2bc89ea0ecdd2bc581b5abfc32a3a41e9e5e4f06
|
[] |
no_license
|
tsuyukimakoto/ghraphql_with_django
|
fb3dd581fdc763f3470736e6b3a56975064d9586
|
2cda01b94842f0970d82efadcd0d0fb841cb995d
|
refs/heads/master
| 2020-05-01T07:53:08.936294
| 2019-12-21T01:36:46
| 2019-12-21T01:36:46
| 177,362,804
| 1
| 0
| null | 2020-01-03T14:33:53
| 2019-03-24T02:39:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import Actor, Movie
class ActorType(DjangoObjectType):
class Meta:
model = Actor
class MovieType(DjangoObjectType):
class Meta:
model = Movie
class Query(ObjectType):
actor = graphene.Field(ActorType, id=graphene.Int())
movie = graphene.Field(MovieType, id=graphene.Int())
actors = graphene.List(ActorType)
movies = graphene.List(MovieType)
def resolve_actor(self, info, **kwargs):
_id = kwargs.get('id')
if _id is not None:
return Actor.objects.get(pk=_id)
return None
def resolve_movie(self, info, **kwargs):
_id = kwargs.get('id')
if _id is not None:
return Movie.objects.get(pk=id)
return None
def resolve_actors(self, info, **kwargs):
return Actor.objects.all()
def resolve_movies(self, info, **kwargs):
return Movie.objects.all()
class ActorInput(graphene.InputObjectType):
id = graphene.ID()
name = graphene.String()
class MovieInput(graphene.InputObjectType):
id = graphene.ID()
title = graphene.String()
actors = graphene.List(ActorInput)
year = graphene.Int()
class CreateActor(graphene.Mutation):
ok = graphene.Boolean()
actor = graphene.Field(ActorType)
class Arguments:
input = ActorInput(required=True)
@staticmethod
def mutate(root, info, input=None):
ok = True
actor_instance = Actor(name=input.name)
actor_instance.save()
return CreateActor(ok=ok, actor=actor_instance)
class UpdateActor(graphene.Mutation):
ok = graphene.Boolean()
actor = graphene.Field(ActorType)
class Arguments:
id = graphene.Int(required=True)
input = ActorInput(required=True)
@staticmethod
def mutate(root, info, id, input=None):
ok = False
actor_instance = Actor.objects.get(pk=id)
if actor_instance:
ok = True
actor_instance.name = input.name
actor_instance.save()
return UpdateActor(ok=ok, actor=actor_instance)
return UpdateActor(ok=ok, actor=None)
class CreateMovie(graphene.Mutation):
ok = graphene.Boolean()
movie = graphene.Field(MovieType)
class Arguments:
input = MovieInput(required=True)
@staticmethod
def mutate(root, info, input=None):
ok = True
actors = []
for actor_input in input.actors:
actor = Actor.objects.get(pk=actor_input.id)
if actor is None:
return CreateMovie(ok=False, movie=None)
actors.append(actor)
movie_instance = Movie(
title=input.title,
year=input.year
)
movie_instance.save()
movie_instance.actors.set(actors)
return CreateMovie(ok=ok, movie=movie_instance)
class UpdateMovie(graphene.Mutation):
ok = graphene.Boolean()
movie = graphene.Field(MovieType)
class Arguments:
id = graphene.Int(required=True)
input = MovieInput(required=True)
@staticmethod
def mutate(root, info, id, input=None):
ok = False
movie_instance = Movie.objects.get(pk=id)
if movie_instance:
ok = True
actors = []
for actor_input in input.actors:
actor = Actor.objects.get(pk=actor_input.id)
if actor is None:
return CreateMovie(ok=False, movie=None)
actors.append(actor)
movie_instance = Movie(
title=input.title,
year=input.year
)
movie_instance.save()
movie_instance.actors.set(actors)
return UpdateMovie(ok=ok, movie=movie_instance)
return UpdateMovie(ok=ok, movie=None)
class Mutation(graphene.ObjectType):
create_actor = CreateActor.Field()
update_actor = UpdateActor.Field()
create_movie = CreateMovie.Field()
update_movie = UpdateMovie.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
|
[
"mtsuyuki@gmail.com"
] |
mtsuyuki@gmail.com
|
81e5a6c5622b291eb98c5d67710816b22f2b498a
|
d03f5069b4fbf8da3c6d36ccb308904286856f37
|
/archive/src/step/child.py
|
22b7fcafa3a3addb1475e9793ccbbd0d52f402c7
|
[] |
no_license
|
TeraokaKanekoLab/Tree-Decomposition
|
597d3119ee7bd1cbd5abf267d929eac99269abb7
|
9bbb94722d3c4562cbf4564f0213c20cf1019599
|
refs/heads/master
| 2023-02-22T03:28:43.443392
| 2021-01-27T12:20:48
| 2021-01-27T12:20:48
| 298,772,084
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
import sys
import numpy as np
import matplotlib.pyplot as plt
def read_file():
if len(sys.argv) != 3:
print("usage: python3", sys.argv[0], "<filename> <width>")
exit()
filename = sys.argv[1]
arg_width = sys.argv[2]
filepath = "output/step/mdh-"+arg_width+"-"+filename+".output"
f = open(filepath, "r")
lines = f.readlines()
widths1 = []
childs1 = []
cnts1 = []
length = len(lines)
num_nodes = int(lines[0])
for i in range(1, length):
line = lines[i]
width1 = int(line.split()[1])
child = float(line.split()[5])
widths1.append(width1/num_nodes)
childs1.append(child)
cnts1.append(child)
filepath = "output/step/sbr-" + arg_width+"-"+filename+".output"
f = open(filepath, "r")
lines = f.readlines()
widths2 = []
cnts2 = []
for line in lines:
width = int(line.split()[1])
child = float(line.split()[5])
widths2.append(width/num_nodes)
cnts2.append(child)
filepath = "output/step/dmdh-" + arg_width + "-" + filename+".output"
f = open(filepath, "r")
lines = f.readlines()
widths3 = []
cnts3 = []
for line in lines:
width = int(line.split()[1])
child = float(line.split()[5])
widths3.append(width/num_nodes)
cnts3.append(child)
filepath = "output/step/lmdh-" + arg_width + "-" + filename+".output"
f = open(filepath, "r")
lines = f.readlines()
widths4 = []
cnts4 = []
for line in lines:
width = int(line.split()[1])
child = float(line.split()[5])
widths4.append(width/num_nodes)
cnts4.append(child)
return widths1, cnts1, widths2, cnts2, widths3, cnts3, widths4, cnts4, filename
def draw_chart(x_axis1, y_axis1, x_axis2, y_axis2, x_axis3, y_axis3, x_axis4, y_axis4, filename):
plt.plot(x_axis1, y_axis1, c="r", label="MDH")
plt.plot(x_axis2, y_axis2, c="g", label="SBR")
plt.plot(x_axis3, y_axis3, c="b", label="DMDH")
plt.plot(x_axis4, y_axis4, c="c", label="LMDH")
width = sys.argv[2]
saved_name = "charts/step/child/" + width + "-" + filename + ".pdf"
print(saved_name)
plt.xlim(0, 1)
plt.xlabel("remove rate")
plt.ylabel("# of children per parent")
plt.title("# of children per parent over remove rate of width " +
width + ": " + filename)
plt.legend(loc="lower right", fontsize=14) # (7)凡例表示
plt.savefig(saved_name)
if __name__ == '__main__':
widths1, cnts1, widths2, cnts2, widths3, cnts3, widths4, cnts4, filename = read_file()
draw_chart(widths1, cnts1, widths2, cnts2, widths3,
cnts3, widths4, cnts4, filename)
|
[
"cirusthenter@gmail.com"
] |
cirusthenter@gmail.com
|
227c5bb2771d7399e52af5b01e03e2f2cdeb5f7f
|
214afa34f9f25127836cb8fabd864d6334ea53ef
|
/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi
|
b95240b8edc0d122428279d7292857835d4b4d78
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
NetherNova/WetterBerry
|
758f1b5fa57dc6ee0828979c0a872b7da0f86117
|
530ae00d5d41d085582178c565133578bad00d5a
|
refs/heads/master
| 2021-05-04T10:17:46.179731
| 2015-11-22T17:31:17
| 2015-11-22T17:31:17
| 46,669,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,421
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "ia32",
"node_has_winsdk": "true",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\martin\\.node-gyp\\0.10.34",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"user_agent": "npm/1.4.28 node/v0.10.34 win32 ia32",
"registry": "https://registry.npmjs.org/",
"prefix": "C:\\Users\\martin\\AppData\\Roaming\\npm",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cafile": "",
"cache": "C:\\Users\\martin\\AppData\\Roaming\\npm-cache",
"cache_lock_stale": "60000",
"cache_lock_retries": "10",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"email": "",
"engine_strict": "",
"force": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_mintimeout": "10000",
"fetch_retry_maxtimeout": "60000",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\martin\\AppData\\Roaming\\npm\\etc\\npmrc",
"group": "",
"heading": "npm",
"ignore_scripts": "",
"init_module": "C:\\Users\\martin\\.npm-init.js",
"init_author_name": "",
"init_author_email": "",
"init_author_url": "",
"init_license": "ISC",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "0.10.34",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"production": "",
"proprietary_attribs": "true",
"https_proxy": "",
"rebuild_bundle": "true",
"rollback": "true",
"save": "",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"searchopts": "",
"searchexclude": "",
"searchsort": "name",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tmp": "C:\\Users\\martin\\AppData\\Local\\Temp",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"username": "",
"userconfig": "C:\\Users\\martin\\.npmrc",
"umask": "",
"version": "",
"versions": "",
"viewer": "browser",
"globalignorefile": "C:\\Users\\martin\\AppData\\Roaming\\npm\\etc\\npmignore"
}
}
|
[
"martin.ringsquandl@googlemail.com"
] |
martin.ringsquandl@googlemail.com
|
8c66e4849507700754a2b58f083bc6e0ab85afb6
|
f58f8120d5ceadf95042b9c2f2b24a2eaf109ddc
|
/train-classifier-from-scratch-master/MyTest3.py
|
d4d770b284743df0a0071c043485b6f8500e7b3b
|
[] |
no_license
|
heBody/mygit
|
2d52ac33af14b0faa65aceaff58fb5f2e1578f72
|
a5731a5d1c4c2699f63599d02e4b27533f19583a
|
refs/heads/master
| 2022-07-13T05:49:05.546725
| 2022-07-01T01:25:00
| 2022-07-01T01:25:00
| 117,950,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
#coding:utf-8
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random, math
x = np.linspace(-1,1,500)[:,np.newaxis] #列向量
noise = np.random.normal(0,0.1,x.shape)
y = np.power(x,3) + noise
xs = tf.placeholder(tf.float32,[None, 1])
ys = tf.placeholder(tf.float32,y.shape)
#构建神经网络
#输入,输出神经元个数,激活函数
l1 = tf.layers.dense(xs,20,tf.nn.relu) #输出10个神经元的隐藏层,激活函数relu
output = tf.layers.dense(l1,1) #输入l1,输出神经元个数1
#定义均方误差loss
#tf.losses.mean_squared_error
loss = tf.losses.mean_squared_error(ys,output) #均方误差
#定义优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.4).minimize(loss) #数据量较小调大learning_rate使其学习加快
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
plt.ion() #打开交互模式
for step in range(100):
_,c = sess.run([optimizer,loss],feed_dict={xs:x,ys:y})
prediction = sess.run(output,feed_dict={xs:x}) #计算预测值
if step % 5 == 0:
#可以用clf()来清空当前图像,用cla()来清空当前坐标
plt.clf()#清空当前图像
plt.scatter(x,y)
plt.plot(x,prediction,'c-',lw='5')
plt.text(0,0.5,'cost=%.4f' % c,fontdict={'size':15,'color':'red'}) #添加text,位置在坐标轴0,0.5处
testVal = np.array([[random.random()]])
print(testVal, np.power(testVal,3), sess.run(output, {xs: testVal}))
plt.pause(0.1) #暂停0.1s
plt.ioff() #关闭交互模式
plt.show()
|
[
"32090495+heBody@users.noreply.github.com"
] |
32090495+heBody@users.noreply.github.com
|
fc3bb5dc6ff0e342805467fe0017419ccd3b7a6a
|
9a6a38e34708930ee54ab8d68df8e545876b7123
|
/app/study/tests/test_view.py
|
2a1a16c2170db364dbeb04902346aa77d4cb5438
|
[] |
no_license
|
Avderevo/IT-school--django-react
|
b95a1350e550963fb6e034a6553c4d6f0dd33ee7
|
0878c39cf2e054543d2cf9b8aed8cb3b8e4ecbc3
|
refs/heads/master
| 2020-04-15T05:29:35.158235
| 2019-01-26T20:58:31
| 2019-01-26T20:58:31
| 164,425,247
| 3
| 0
| null | 2020-02-11T23:38:22
| 2019-01-07T11:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 7,009
|
py
|
import json
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from users.tests.test_view import TokenTestCase
class StudyViewApiTest(TokenTestCase):
client = APIClient()
fixtures = ['course.json', 'lessons.json']
def setUp(self):
super(StudyViewApiTest, self).setUp()
self.course_1_id = 1
self.course_2_id = 2
self.student = self.client.post(
reverse(
"users:create_user",
),
data=json.dumps(
{
"username": 'student',
"password": 'password',
"email": 'sdudent@gmail.com',
"status": 1
}
),
content_type='application/json'
)
self.teacher = self.client.post(
reverse(
"users:create_user",
),
data=json.dumps(
{
"username": 'teacher',
"password": 'password',
"email": 'teachert@gmail.com',
"status": 2
}
),
content_type='application/json'
)
def course_test_done(self, courseId):
token = self.get_token('student', 'password')
url = reverse('study:course_test', kwargs={'courseId': courseId})
self.client.post(url, headers={'token': token}, data={"testResult": {'testResult': '4'}},
format='json')
def register_teacher_on_course(self, courseId):
token = self.get_token('teacher', "password")
url = reverse('study:register_teacher_course', kwargs={'courseId': courseId})
self.client.post(url, headers={'token': token})
def save_chat_message(self, statisticId, message):
self.course_test_done(self.course_1_id)
token = self.get_token('student', "password")
url = reverse('study:save_chat_message')
self.client.post(url, headers={'token': token}, data={'statisticId': statisticId, 'message': message})
def test_course_test_done(self):
token = self.get_token('student', "password")
url = reverse('study:course_test', kwargs={'courseId': self.course_1_id})
response = self.client.post(url, headers={'token':token},
data={"testResult":{'testResult':'4'}}, format='json')
self.assertEqual(201, response.status_code)
def test_repeated_course_test_done(self):
token = self.get_token('student', "password")
url = reverse('study:course_test', kwargs={'courseId': self.course_1_id})
self.client.post(url, headers={'token': token}, data={"testResult": {'testResult': '4'}},
format='json')
response = self.client.post(url, headers={'token': token}, data={"testResult": {'testResult': '4'}},
format='json')
self.assertEqual(400, response.status_code)
def test_lesson_vs_statistic(self):
self.course_test_done(self.course_1_id)
token = self.get_token('student', "password")
url = reverse(
"study:user_statistics", kwargs={'courseId':self.course_1_id}
)
response = self.client.get(url, headers={'token':token})
self.assertEqual(200, response.status_code)
self.assertEqual(response.data[0]['lesson']['id'], 1)
self.assertEqual(response.data[1]['lesson']['id'], 2)
def test_course_statistic(self):
token = self.get_token('student', "password")
url = reverse(
"study:course_statistic", kwargs={'courseId': self.course_1_id}
)
response = self.client.get(url, headers={'token': token})
self.assertEqual(200, response.status_code)
def test_user_course_list(self):
token = self.get_token('teacher', "password")
self.course_test_done(self.course_1_id)
self.course_test_done(self.course_2_id)
url = reverse(
"study:user_courses"
)
response = self.client.get(url, headers={'token': token})
self.assertEqual(200, response.status_code)
self.assertEqual(len(response.data), 2)
def test_get_students_statistics(self):
token = self.get_token('teacher', "password")
self.course_test_done(self.course_1_id)
url = reverse('study:student_statistics', kwargs={'userId':2, 'courseId':self.course_1_id})
response = self.client.get(url, headers={'token': token})
self.assertEqual(response.status_code, 200)
def test_register_teacher_on_course(self):
token = self.get_token('teacher', "password")
url = reverse('study:register_teacher_course', kwargs={'courseId': self.course_1_id})
response = self.client.post(url, headers={'token': token})
self.assertEqual(response.status_code, 201)
def test_get_teacher_courses(self):
token = self.get_token('teacher', "password")
self.register_teacher_on_course(self.course_1_id)
self.register_teacher_on_course(self.course_2_id)
url = reverse('study:teacher-courses')
response = self.client.get(url, headers={'token': token})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_get_one_course(self):
url = reverse('study:get_one_course', kwargs={'courseId': self.course_1_id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['id'], self.course_1_id)
def test_get_all_courses(self):
url = reverse('study:get_all_courses')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_homework_status_change(self):
self.course_test_done(self.course_1_id)
token = self.get_token('teacher', "password")
url = reverse('study:change_homework_status', kwargs={'statisticId':1})
response = self.client.post(url, headers={'token': token}, data={'status':2} )
self.assertEqual(response.status_code, 201)
def test_save_chat_message(self):
self.course_test_done(self.course_1_id)
token = self.get_token('student', "password")
url = reverse('study:save_chat_message')
response = self.client.post(url, headers={'token': token}, data={'statisticId': 1, 'message':'Hello'})
self.assertEqual(response.status_code, 201)
def test_get_chat_message(self):
self.course_test_done(self.course_1_id)
token = self.get_token('student', "password")
self.save_chat_message(1, 'Hello world!')
url = reverse('study:get_chat_message', kwargs={'statisticId':1})
response=self.client.get(url, headers={'token': token})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data[0]['message_body'], 'Hello world!')
|
[
"freeyura78@gmail.com"
] |
freeyura78@gmail.com
|
9ac3466096fe39e81d75e7be6759f330935d8f41
|
be5142565865890e334ce9f7c7592eedaf6ccf58
|
/main.py
|
151192d5c4b003128b63110e840c78138e093b45
|
[] |
no_license
|
Krlstch/oklejki
|
197dec195db738428cb2bfd38fd544fa36984888
|
26d5165f61d16e95bad4866ea105e108f48f395c
|
refs/heads/main
| 2023-02-03T20:53:29.759418
| 2020-12-23T17:03:29
| 2020-12-23T17:03:29
| 323,897,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,404
|
py
|
import math
import time
import pygame
cell_number_width = 30
cell_color_height = 30
cell_width = 60
cell_height = 50
cell_border = 1
cell_clear_width = 60
cell_clear_height = 30
cell_clear_upper_margin = 10
cell_clear_lower_margin = 10
cell_clear_right_margin = 10
def draw_background(game_display):
game_display.fill((255, 255, 255))
# horizontal
for i in range(9):
rect = (0, cell_color_height + i * (cell_height + cell_border),
cell_number_width + 3 * (cell_width + cell_border), cell_border)
pygame.draw.rect(game_display, (0, 0, 0), rect, 0)
# vertical
for i in range(3):
rect = (cell_number_width + i * (cell_width + cell_border), 0,
cell_border, cell_color_height + 8 * (cell_height + cell_border))
pygame.draw.rect(game_display, (0, 0, 0), rect, 0)
# numbers
font = pygame.font.SysFont("Arial", 24)
for i in range(1, 9):
size = font.size(str(i))
num = font.render(str(i), 1, (0, 0, 0), (255, 255, 255))
game_display.blit(num, (int((cell_number_width - size[0]) / 2),
int(cell_color_height + cell_border + (i - 1) * (cell_height + cell_border) + (cell_height - size[1]) / 2)))
# colors
font = pygame.font.SysFont("Arial", 12)
for i, color in enumerate(("Niebieski", "Czerwony", "Żółty")):
size = font.size(color)
col = font.render(color, 1, (0, 0, 0), (255, 255, 255))
game_display.blit(col, (int(cell_number_width + cell_border + i * (cell_width + cell_border) + (cell_width - size[0]) / 2),
int((cell_color_height - size[1]) / 2)))
# clear button
rect = (cell_number_width + 3 * (cell_width + cell_border) - cell_clear_right_margin - cell_clear_width,
cell_color_height + 8 * (cell_height + cell_border) + cell_border + cell_clear_upper_margin,
cell_clear_width,
cell_clear_height)
pygame.draw.rect(game_display, (0, 0, 0), rect, cell_border)
font = pygame.font.SysFont("Arial", 12)
size = font.size("Wyczyść")
clear = font.render("Wyczyść", 1, (0, 0, 0), (255, 255, 255))
game_display.blit(clear, (int(cell_number_width + 3 * (cell_width + cell_border) - cell_clear_right_margin - cell_clear_width + (cell_clear_width - size[0]) / 2),
int(cell_color_height + 8 * (cell_height + cell_border) + cell_border + cell_clear_upper_margin + (cell_clear_height - size[1]) / 2)))
pygame.display.update()
def clear_cells(game_display):
for i in range(3):
color = [(0, 0, 255), (255, 0, 0), (255, 242, 0)][i]
for j in range(8):
rect = (cell_number_width + i * (cell_width + cell_border) + cell_border,
cell_color_height + j * (cell_height + cell_border) + cell_border,
cell_width,
cell_height)
pygame.draw.rect(game_display, color, rect, 0)
pygame.display.update()
def update_cell(tile_x, tile_y, cell, game_display):
rect = (cell_number_width + tile_x * (cell_width + cell_border) + cell_border,
cell_color_height + tile_y * (cell_height + cell_border) + cell_border,
cell_width,
cell_height)
color = [(0, 0, 255), (255, 0, 0), (255, 242, 0)][tile_x]
pygame.draw.rect(game_display, color, rect, 0)
if cell == 1:
# draw X
size = int(0.6 * min(cell_width, cell_height))
up = cell_color_height + tile_y * (cell_height + cell_border) + cell_border + int((cell_height - size) / 2)
down = cell_color_height + tile_y * (cell_height + cell_border) + cell_border + int((cell_height + size) / 2)
left = cell_number_width + tile_x * (cell_width + cell_border) + cell_border + int((cell_width - size) / 2)
right = cell_number_width + tile_x * (cell_width + cell_border) + cell_border + int((cell_width + size) / 2)
pygame.draw.line(game_display, (0, 0, 0), (left, up), (right, down), 4)
pygame.draw.line(game_display, (0, 0, 0), (left, down), (right, up), 4)
elif cell == 2:
# draw O
radius = int(0.3 * min(cell_width, cell_height))
x = cell_number_width + tile_x * (cell_width + cell_border) + cell_border + int(cell_width / 2)
y = cell_color_height + tile_y * (cell_height + cell_border) + cell_border + int(cell_height / 2)
pygame.draw.circle(game_display, (0, 0, 0), (x, y), radius, 4)
pygame.display.update()
def key_pressed(i, position, cells, game_display):
tile_x = math.floor((position[0] - cell_number_width) / (cell_width + cell_border))
tile_y = math.floor((position[1] - cell_color_height) / (cell_height + cell_border))
if 0 <= tile_x <= 2 and 0 <= tile_y <= 7:
if cells[tile_x][tile_y] == i + 1:
cells[tile_x][tile_y] = 0
else:
cells[tile_x][tile_y] = i + 1
update_cell(tile_x, tile_y, cells[tile_x][tile_y], game_display)
# print("x:{0}\ty:{1}".format(tile_x, tile_y))
else:
if cell_number_width + 3 * (cell_width + cell_border) - cell_clear_right_margin - cell_clear_width <= position[0] <= \
cell_number_width + 3 * (cell_width + cell_border) - cell_clear_right_margin and \
cell_color_height + 8 * (cell_height + cell_border) + cell_border + cell_clear_upper_margin <= position[1] <= \
cell_color_height + 8 * (cell_height + cell_border) + cell_border + cell_clear_upper_margin + cell_clear_height:
for i in range(3):
for j in range(8):
cells[i][j] = 0
clear_cells(game_display)
# print("clear")
def get_input(buttons_pressed, cells, game_display):
mouse_keys = pygame.mouse.get_pressed()[::2]
for i in range(2):
if mouse_keys[i]:
if not buttons_pressed[i]:
key_pressed(i, pygame.mouse.get_pos(), cells, game_display)
# print("{0} pressed".format(["lmb", "rmb"][i]))
buttons_pressed[i] = True
else:
buttons_pressed[i] = False
return buttons_pressed
if __name__ == "__main__":
pygame.init()
game_display = pygame.display.set_mode((cell_number_width + 3 * (cell_width + cell_border),
cell_color_height + 8 * (
cell_height + cell_border) + cell_border + cell_clear_upper_margin + cell_clear_height + cell_clear_lower_margin))
draw_background(game_display)
clear_cells(game_display)
cells = [[0 for _ in range(8)] for _ in range(3)]
buttons_pressed = [False, False]
run = True
target_fps = 60
prev_time = time.time()
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
buttons_pressed = get_input(buttons_pressed, cells, game_display)
# Handle time
curr_time = time.time()
diff = curr_time - prev_time
delay = max(1.0 / target_fps - diff, 0)
time.sleep(delay)
fps = 1.0 / (delay + diff)
prev_time = curr_time
pygame.quit()
quit()
|
[
"noreply@github.com"
] |
Krlstch.noreply@github.com
|
b81490fb9503fda9cc9046ed991954a66b465018
|
e74463d223acfe6b849177177cb409060e7a44d1
|
/Data Structures and Algorithms/01 Algorithmic Toolbox/Week 6 - Dynamic Programming 2/assignment/test_knapsack.py
|
209c4bcbe684fb1999ab30911f43150511f024b3
|
[] |
no_license
|
AlexEngelhardt-old/courses
|
24f4acf6de22f6707568024c5ee4a2fde412e461
|
739be99265b0aca1c58abe6f107b4c49de055b9d
|
refs/heads/master
| 2023-05-05T22:25:50.327739
| 2020-12-09T14:57:46
| 2020-12-09T14:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from knapsack import optimal_weight
W = 10
w = [1, 4, 8]
print(optimal_weight(W, w))
|
[
"alexander.w.engelhardt@gmail.com"
] |
alexander.w.engelhardt@gmail.com
|
91079f982271171ec88a5a4d36f79f4e45a7372d
|
b9f0f1644464a89ad9d2c7cc4c888fa18ef035a9
|
/page/page_public.py
|
fb4d41d15b748712c2b8dcd2051e8076efcbbb34
|
[] |
no_license
|
lixiaofeng1993/UIAutomation
|
004cfa3a6d3149dec112a51a471ffef3ba2cf7e5
|
6381b5b7698ecbe9e92094f0e46a72f9b7e13e76
|
refs/heads/master
| 2020-04-14T20:28:13.284923
| 2019-11-22T05:11:16
| 2019-11-22T05:11:16
| 164,094,808
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/26 11:43
# @Author : lixiaofeng
# @Site :
# @File : page_public.py
# @Software: PyCharm
from common.basics import Crazy
class Pulicpage(Crazy):
"""公众号"""
choice_public_loc = ('id', 'com.tencent.mm:id/b5m') # 置顶公众号,默认选择第一个
def clicks_choice_public(self):
self.clicks(self.choice_public_loc, 0)
find_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/d7b" and @text="发现"]') # 发现按钮
def click_find(self):
self.click(self.find_loc)
search_btn_loc = ('xpath', '//*[@resource-id="android:id/title" and @text="搜一搜"]') # 搜一搜
def click_search_btn(self):
self.click(self.search_btn_loc)
search_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/kh" and @text="搜索"]') # 搜索
def click_search(self):
self.click(self.search_loc)
small_zao_loc = ('xpath', '//*[contains(@text, "小小包早教")]')
def click_small_zao(self):
self.click(self.small_zao_loc)
two_small_zao_loc = ('xpath', '//*[contains(@text, "提供原创的母婴类文章,母婴用品评测。")]') # 搜索后的列表
def click_two_small_zao(self):
self.click(self.two_small_zao_loc)
follow_public_loc = ('xpath', '//*[@resource-id="android:id/title" and @text="关注公众号"]') # 搜索
def click_follow_public(self):
self.click(self.follow_public_loc)
in_home_zao_btn_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/amt" and @text="在家早教课"]') # 公众号
def click_in_home_zao(self):
self.click(self.in_home_zao_btn_loc)
qr_code_loc = ('xpath',
'//com.tencent.tbs.core.webkit.WebView/android.webkit.WebView/android.view.View/android.view.View/android.view.View') # 要识别的二维码
def element_qr_code(self):
return self.find_element(self.qr_code_loc)
discern_code_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/cw" and @text="识别图中二维码"]') # 识别图中二维码
def click_discern_code(self):
self.click(self.discern_code_loc)
class_group_loc = ('id', 'android:id/text1') # 群名称
def text_class_group(self):
return self.get_text(self.class_group_loc)
small_zao_title_loc = ('id', 'com.tencent.mm:id/k3') # 小小包早教公众名称
def text_small_zao_title(self):
return self.get_text(self.small_zao_title_loc)
more_btn_loc = ('id', 'com.tencent.mm:id/jy') # 三个点
def click_more_btn(self):
self.click(self.more_btn_loc)
not_paying_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/b11" and @text="不再关注"]') # 不再关注
def click_not_paying(self):
self.click(self.not_paying_loc)
sure_not_paying_loc = ('xpath', '//*[@resource-id="com.tencent.mm:id/az_" and @text="不再关注"]') # 确定不再关注
def click_sure_not_paying(self):
self.click(self.sure_not_paying_loc)
back_btn_loc = ('id', 'com.tencent.mm:id/kb') # x 号
def click_back_btn(self):
self.click(self.back_btn_loc)
|
[
"liyongfeng@xxbmm.com"
] |
liyongfeng@xxbmm.com
|
f690ec10c0ef739a407247e7ba73ddcb9e2cd94e
|
8794ed160bab5675a253735f1c7567e7e2ab5cc0
|
/scraping/migrations/0004_releaseinfo.py
|
6bafbfb19a6c2768c6b4e9392ec13feff3f1f76b
|
[] |
no_license
|
ooyu-kioo/backend_django_mcu
|
924f87fb0b0e8ad0b19b1ee44636c371388ffed9
|
b37653aa01eca0237abd5ad4e7a8c14cff8fe01b
|
refs/heads/master
| 2022-10-07T04:06:32.280450
| 2019-07-16T00:14:11
| 2019-07-16T00:14:11
| 185,401,266
| 0
| 0
| null | 2022-09-30T20:10:27
| 2019-05-07T12:51:32
|
Python
|
UTF-8
|
Python
| false
| false
| 930
|
py
|
# Generated by Django 2.2 on 2019-05-24 05:19
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('scraping', '0003_auto_20190504_0213'),
]
operations = [
migrations.CreateModel(
name='ReleaseInfo',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('artist_name', models.CharField(max_length=300)),
('release_title', models.CharField(max_length=300, unique=True)),
('release_date', models.CharField(max_length=300)),
('buy_url', models.CharField(max_length=300)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'db_table': 'releaseInfo',
},
),
]
|
[
""
] | |
e4229e2ca9264838c89a079c67ee7a069472d406
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_hydronium1.py
|
a683742b4fb99df679d99eb4d786e8944159d953
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
from xcp2k.inputsection import InputSection
from _point12 import _point12
class _hydronium1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Oxygens = []
self.Hydrogens = []
self.Roo = None
self.Pno = None
self.Qno = None
self.Roh = None
self.Pnh = None
self.Qnh = None
self.Nh = None
self.P = None
self.Q = None
self.Lambda = None
self.Lambda = None
self.POINT_list = []
self._name = "HYDRONIUM"
self._keywords = {'Roh': 'ROH', 'Nh': 'NH', 'P': 'P', 'Roo': 'ROO', 'Q': 'Q', 'Pnh': 'PNH', 'Pno': 'PNO', 'Qno': 'QNO', 'Qnh': 'QNH', 'Lambda': 'LAMBDA'}
self._repeated_keywords = {'Oxygens': 'OXYGENS', 'Hydrogens': 'HYDROGENS'}
self._repeated_subsections = {'POINT': '_point12'}
self._aliases = {'Expon_numerator': 'P', 'Nhtest': 'Nh', 'R_oh': 'Roh', 'Expon_denominator': 'Q', 'R_oo': 'Roo', 'Expon_numeratorb': 'Pnh', 'Expon_numeratora': 'Pno', 'Expon_denominatorb': 'Qnh', 'Expon_denominatora': 'Qno'}
self._attributes = ['POINT_list']
def POINT_add(self, section_parameters=None):
new_section = _point12()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.POINT_list.append(new_section)
return new_section
@property
def R_oo(self):
"""
See documentation for Roo
"""
return self.Roo
@property
def Expon_numeratora(self):
"""
See documentation for Pno
"""
return self.Pno
@property
def Expon_denominatora(self):
"""
See documentation for Qno
"""
return self.Qno
@property
def R_oh(self):
"""
See documentation for Roh
"""
return self.Roh
@property
def Expon_numeratorb(self):
"""
See documentation for Pnh
"""
return self.Pnh
@property
def Expon_denominatorb(self):
"""
See documentation for Qnh
"""
return self.Qnh
@property
def Nhtest(self):
"""
See documentation for Nh
"""
return self.Nh
@property
def Expon_numerator(self):
"""
See documentation for P
"""
return self.P
@property
def Expon_denominator(self):
"""
See documentation for Q
"""
return self.Q
@R_oo.setter
def R_oo(self, value):
self.Roo = value
@Expon_numeratora.setter
def Expon_numeratora(self, value):
self.Pno = value
@Expon_denominatora.setter
def Expon_denominatora(self, value):
self.Qno = value
@R_oh.setter
def R_oh(self, value):
self.Roh = value
@Expon_numeratorb.setter
def Expon_numeratorb(self, value):
self.Pnh = value
@Expon_denominatorb.setter
def Expon_denominatorb(self, value):
self.Qnh = value
@Nhtest.setter
def Nhtest(self, value):
self.Nh = value
@Expon_numerator.setter
def Expon_numerator(self, value):
self.P = value
@Expon_denominator.setter
def Expon_denominator(self, value):
self.Q = value
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4d5c56c2f007d63298add4efecd1536045d738d2
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_06_01_preview/operations/_vaults_operations.py
|
941ca54d0d9171c189c4bebbf50201aaf841d748
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 76,701
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, vault_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str", pattern=r"^[a-zA-Z0-9-]{3,24}$"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(resource_group_name: str, vault_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str", pattern=r"^[a-zA-Z0-9-]{3,24}$"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(resource_group_name: str, vault_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, vault_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_access_policy_request(
resource_group_name: str,
vault_name: str,
operation_kind: Union[str, _models.AccessPolicyUpdateKind],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicies/{operationKind}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str", pattern=r"^[a-zA-Z0-9-]{3,24}$"),
"operationKind": _SERIALIZER.url("operation_kind", operation_kind, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(
resource_group_name: str, subscription_id: str, *, top: Optional[int] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_subscription_request(
subscription_id: str, *, top: Optional[int] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/vaults")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_deleted_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/deletedVaults")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_deleted_request(vault_name: str, location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"location": _SERIALIZER.url("location", location, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_purge_deleted_request(vault_name: str, location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}/purge",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"location": _SERIALIZER.url("location", location, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, *, top: Optional[int] = None, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
filter: Literal["resourceType eq 'Microsoft.KeyVault/vaults'"] = kwargs.pop(
"filter", _params.pop("$filter", "resourceType eq 'Microsoft.KeyVault/vaults'")
)
api_version: Literal["2015-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2015-11-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resources")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_check_name_availability_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/checkNameAvailability"
)
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class VaultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.keyvault.v2021_06_01_preview.KeyVaultManagementClient`'s
:attr:`vaults` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
vault_name: str,
parameters: Union[_models.VaultCreateOrUpdateParameters, IO],
**kwargs: Any
) -> _models.Vault:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Vault] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "VaultCreateOrUpdateParameters")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vault_name=vault_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Vault", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Vault", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vault_name: str,
parameters: _models.VaultCreateOrUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Vault]:
"""Create or update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to create or update the vault. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultCreateOrUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vault_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Vault]:
"""Create or update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to create or update the vault. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vault_name: str,
parameters: Union[_models.VaultCreateOrUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.Vault]:
"""Create or update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to create or update the vault. Is either a
VaultCreateOrUpdateParameters type or a IO type. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultCreateOrUpdateParameters
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Vault] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Vault", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}"
}
@overload
def update(
self,
resource_group_name: str,
vault_name: str,
parameters: _models.VaultPatchParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Vault:
"""Update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to patch the vault. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultPatchParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
vault_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Vault:
"""Update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to patch the vault. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
vault_name: str,
parameters: Union[_models.VaultPatchParameters, IO],
**kwargs: Any
) -> _models.Vault:
"""Update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param parameters: Parameters to patch the vault. Is either a VaultPatchParameters type or a IO
type. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultPatchParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Vault] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "VaultPatchParameters")
request = build_update_request(
resource_group_name=resource_group_name,
vault_name=vault_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Vault", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Vault", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}"
}
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, vault_name: str, **kwargs: Any
) -> None:
"""Deletes the specified Azure key vault.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param vault_name: The name of the vault to delete. Required.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
vault_name=vault_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}"
}
@distributed_trace
def get(self, resource_group_name: str, vault_name: str, **kwargs: Any) -> _models.Vault:
"""Gets the specified Azure key vault.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param vault_name: The name of the vault. Required.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[_models.Vault] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
vault_name=vault_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Vault", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}"
}
@overload
def update_access_policy(
self,
resource_group_name: str,
vault_name: str,
operation_kind: Union[str, _models.AccessPolicyUpdateKind],
parameters: _models.VaultAccessPolicyParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.VaultAccessPolicyParameters:
"""Update access policies in a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param operation_kind: Name of the operation. Known values are: "add", "replace", and "remove".
Required.
:type operation_kind: str or
~azure.mgmt.keyvault.v2021_06_01_preview.models.AccessPolicyUpdateKind
:param parameters: Access policy to merge into the vault. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultAccessPolicyParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VaultAccessPolicyParameters or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultAccessPolicyParameters
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update_access_policy(
self,
resource_group_name: str,
vault_name: str,
operation_kind: Union[str, _models.AccessPolicyUpdateKind],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.VaultAccessPolicyParameters:
"""Update access policies in a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param operation_kind: Name of the operation. Known values are: "add", "replace", and "remove".
Required.
:type operation_kind: str or
~azure.mgmt.keyvault.v2021_06_01_preview.models.AccessPolicyUpdateKind
:param parameters: Access policy to merge into the vault. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VaultAccessPolicyParameters or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultAccessPolicyParameters
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update_access_policy(
self,
resource_group_name: str,
vault_name: str,
operation_kind: Union[str, _models.AccessPolicyUpdateKind],
parameters: Union[_models.VaultAccessPolicyParameters, IO],
**kwargs: Any
) -> _models.VaultAccessPolicyParameters:
"""Update access policies in a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param vault_name: Name of the vault. Required.
:type vault_name: str
:param operation_kind: Name of the operation. Known values are: "add", "replace", and "remove".
Required.
:type operation_kind: str or
~azure.mgmt.keyvault.v2021_06_01_preview.models.AccessPolicyUpdateKind
:param parameters: Access policy to merge into the vault. Is either a
VaultAccessPolicyParameters type or a IO type. Required.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultAccessPolicyParameters
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VaultAccessPolicyParameters or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultAccessPolicyParameters
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VaultAccessPolicyParameters] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "VaultAccessPolicyParameters")
request = build_update_access_policy_request(
resource_group_name=resource_group_name,
vault_name=vault_name,
operation_kind=operation_kind,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update_access_policy.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VaultAccessPolicyParameters", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VaultAccessPolicyParameters", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
update_access_policy.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicies/{operationKind}"
}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, top: Optional[int] = None, **kwargs: Any
) -> Iterable["_models.Vault"]:
"""The List operation gets information about the vaults associated with the subscription and
within the specified resource group.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
Required.
:type resource_group_name: str
:param top: Maximum number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Vault or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[_models.VaultListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VaultListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults"
}
@distributed_trace
def list_by_subscription(self, top: Optional[int] = None, **kwargs: Any) -> Iterable["_models.Vault"]:
"""The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Vault or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[_models.VaultListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VaultListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/vaults"}
@distributed_trace
def list_deleted(self, **kwargs: Any) -> Iterable["_models.DeletedVault"]:
"""Gets information about the deleted vaults in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedVault or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.DeletedVault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[_models.DeletedVaultListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_deleted_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_deleted.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DeletedVaultListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_deleted.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/deletedVaults"}
@distributed_trace
def get_deleted(self, vault_name: str, location: str, **kwargs: Any) -> _models.DeletedVault:
"""Gets the deleted Azure key vault.
:param vault_name: The name of the vault. Required.
:type vault_name: str
:param location: The location of the deleted vault. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedVault or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.DeletedVault
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[_models.DeletedVault] = kwargs.pop("cls", None)
request = build_get_deleted_request(
vault_name=vault_name,
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_deleted.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeletedVault", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}"
}
def _purge_deleted_initial( # pylint: disable=inconsistent-return-statements
self, vault_name: str, location: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_purge_deleted_request(
vault_name=vault_name,
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._purge_deleted_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_purge_deleted_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}/purge"
}
@distributed_trace
def begin_purge_deleted(self, vault_name: str, location: str, **kwargs: Any) -> LROPoller[None]:
"""Permanently deletes the specified vault. aka Purges the deleted Azure key vault.
:param vault_name: The name of the soft-deleted vault. Required.
:type vault_name: str
:param location: The location of the soft-deleted vault. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._purge_deleted_initial( # type: ignore
vault_name=vault_name,
location=location,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_purge_deleted.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}/purge"
}
@distributed_trace
def list(self, top: Optional[int] = None, **kwargs: Any) -> Iterable["_models.Resource"]:
"""The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return. Default value is None.
:type top: int
:keyword filter: The filter to apply on the operation. Default value is "resourceType eq
'Microsoft.KeyVault/vaults'". Note that overriding this default value may result in unsupported
behavior.
:paramtype filter: str
:keyword api_version: Azure Resource Manager Api Version. Default value is "2015-11-01". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Resource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.Resource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
filter: Literal["resourceType eq 'Microsoft.KeyVault/vaults'"] = kwargs.pop(
"filter", _params.pop("$filter", "resourceType eq 'Microsoft.KeyVault/vaults'")
)
api_version: Literal["2015-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2015-11-01"))
cls: ClsType[_models.ResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resources"}
@overload
def check_name_availability(
self,
vault_name: _models.VaultCheckNameAvailabilityParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckNameAvailabilityResult:
"""Checks that the vault name is valid and is not already in use.
:param vault_name: The name of the vault. Required.
:type vault_name:
~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultCheckNameAvailabilityParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.CheckNameAvailabilityResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def check_name_availability(
self, vault_name: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.CheckNameAvailabilityResult:
"""Checks that the vault name is valid and is not already in use.
:param vault_name: The name of the vault. Required.
:type vault_name: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.CheckNameAvailabilityResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def check_name_availability(
self, vault_name: Union[_models.VaultCheckNameAvailabilityParameters, IO], **kwargs: Any
) -> _models.CheckNameAvailabilityResult:
"""Checks that the vault name is valid and is not already in use.
:param vault_name: The name of the vault. Is either a VaultCheckNameAvailabilityParameters type
or a IO type. Required.
:type vault_name:
~azure.mgmt.keyvault.v2021_06_01_preview.models.VaultCheckNameAvailabilityParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.CheckNameAvailabilityResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CheckNameAvailabilityResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(vault_name, (IOBase, bytes)):
_content = vault_name
else:
_json = self._serialize.body(vault_name, "VaultCheckNameAvailabilityParameters")
request = build_check_name_availability_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.check_name_availability.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CheckNameAvailabilityResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/checkNameAvailability"
}
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
35168cf875fbca339d9a03a620f3d361be4003cf
|
be2788aa621b5ef701163a2291d705752c739bfc
|
/rainframework.py
|
9d6de4c3ccd2a7a8e7913bd64fbf32ccb618c2c1
|
[
"MIT"
] |
permissive
|
charlotteviner/project
|
e804b952d625378a2661b9642f6aa69b4933ce2b
|
225e3183f7bee5f86b142547ebdea8e4c0dea9db
|
refs/heads/master
| 2020-04-10T07:02:35.053762
| 2018-04-26T10:51:02
| 2018-04-26T10:51:02
| 116,581,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,479
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 27 10:52:22 2017
@author: charlotteviner
"""
import random
class Rain():
"""
Set up and provide methods for raindrops as agents.
Set up raindrop coordinates and provide methods to allow the
raindrops to interact with the landscape. Implement property
attributes for the x and y coordinates.
__init__ -- Set up agent coordinates.
getx -- Get the x-coordinate of an agent.
setx -- Set the x-coordinate of an agent.
gety -- Get the y-coordinate of an agent.
sety -- Set the y-coordinate of an agent.
move -- Move the agents downslope.
"""
def __init__(self, land, raindrops, all_drops, length, x, y):
"""
Set up agent coordinates.
Set up coordinates for the raindrops and create a boundary
condition to stop the rain from flowing out of the environment.
Args:
land (list) -- Environment coordinate list.
raindrops (list) -- Agent coordinate list.
all_drops (list) -- List of all raindrop coordinates across
all iterations of the model.
length (int) -- Size of the environment to be used.
y (int) -- Agent y-coordinate.
x (int) -- Agent x-coordinate.
"""
# Allow raindrops to access the size of the environment.
self.length = length
# Set up y-coordinate to be a random integer 0 - 99.
self._y = random.randint(0, length)
# Set up x-coordinate to be a random integer 0 - 99.
self._x = random.randint(0, length)
# Create a boundary condition for the rain, so it can't flow out
# of the environment.
if self._y < 0:
self._y = 0
if self._y > length:
self._y = length
if self._x < 0:
self._x = 0
if self._x > length:
self._x = length
# Allow raindrops to access the land data.
self.land = land
# Allow raindrops to access other raindrop data.
self.raindrops = raindrops
# Allow raindrops to access all raindrops data.
self.all_drops = all_drops
# Implement a property attribute for x.
def getx(self):
"""
Get the x-coordinate of an agent.
Returns:
The x-coordinate of an agent.
"""
return self._x
def setx(self, value):
"""
Set the x-coordinate of an agent.
Args:
value -- An integer.
"""
self._x = value
# Define the property of x.
x = property(getx, setx)
# Implement a property attribute for y.
def gety(self):
"""
Get the y-coordinate of an agent.
Returns:
The y-coordinate of an agent.
"""
return self._y
def sety(self, value):
"""
Set the y-coordinate of an agent.
Args:
value -- An integer.
"""
self._y = value
# Define the property of y.
y = property(gety, sety)
def move(self):
"""
Move the raindrops downslope.
Move the raindrops downslope by assessing the lowest elevation
surrounding the current position of a raindrop and moving the
raindrop to this position.
Returns:
neighbours (list) -- List of neighbouring coordinates.
heights (list) -- List of neighbouring elevations.
y (int) -- New y-coordinate.
x (int) -- New x-coordinate.
"""
# Multiple 'if' statements are required to resolve issues with
# index ranges.
if 1 <= self._x <= (self.length - 1) and \
1 <= self._y <= (self.length - 1):
# Condition for coordinates from (1, 1) to (98, 98).
# Find coordinates of the 8 neighbouring pixels.
neighbours = [(self._x - 1, self._y - 1), (self._x - 1, self._y), \
(self._x - 1, self._y + 1), (self._x, self._y - 1), \
(self._x, self._y + 1), (self._x + 1, self._y - 1), \
(self._x + 1, self._y), (self._x + 1, self._y + 1)]
# Find the elevation data of the 8 neighbouring pixels.
heights = [self.land[self._x - 1][self._y - 1], \
self.land[self._x - 1][self._y], \
self.land[self._x - 1][self._y + 1], \
self.land[self._x][self._y - 1], \
self.land[self._x][self._y + 1], \
self.land[self._x + 1][self._y - 1], \
self.land[self._x + 1][self._y], \
self.land[self._x + 1][self._y + 1]]
elif self._x == 0 and 1 <= self._y <= (self.length - 1):
# Condition for coordinates sitting on the x-axis boundary.
# Find coordinates of the 5 neighbouring pixels.
neighbours = [(self._x, self._y - 1), (self._x, self._y + 1), \
(self._x + 1, self._y - 1), (self._x + 1, self._y), \
(self._x + 1, self._y + 1)]
# Find the elevation data of the 5 neighbouring pixels.
heights = [self.land[self._x][self._y - 1], \
self.land[self._x][self._y + 1], \
self.land[self._x + 1][self._y - 1], \
self.land[self._x + 1][self._y], \
self.land[self._x + 1][self._y + 1]]
elif self._x == self.length and 1 <= self._y <= (self.length - 1):
# Condition for coordinates sitting on the boundary line x = 99.
# Find coordinates of the 5 neighbouring pixels.
neighbours = [(self._x - 1, self._y - 1), (self._x - 1, self._y), \
(self._x - 1, self._y + 1), (self._x, self._y - 1), \
(self._x, self._y + 1)]
# Find the elevation data of the 5 neighbouring pixels.
heights = [self.land[self._x - 1][self._y - 1], \
self.land[self._x - 1][self._y], \
self.land[self._x - 1][self._y + 1], \
self.land[self._x][self._y - 1], \
self.land[self._x][self._y + 1]]
elif 1 <= self._x <= (self.length - 1) and self._y == 0:
# Condition for coordinates sitting on the y-axis boundary.
# Find coordinates of the 5 neighbouring pixels.
neighbours = [(self._x - 1, self._y), (self._x - 1, self._y + 1), \
(self._x, self._y + 1), (self._x + 1, self._y), \
(self._x + 1, self._y + 1)]
# Find the elevation data of the 5 neighbouring pixels.
heights = [self.land[self._x - 1][self._y], \
self.land[self._x - 1][self._y + 1], \
self.land[self._x][self._y + 1], \
self.land[self._x + 1][self._y], \
self.land[self._x + 1][self._y + 1]]
elif 1 <= self._x <= (self.length - 1) and self._y == self.length:
# Condition for coordinates sitting on the boundary line y = 99.
# Find coordinates of the 5 neighbouring pixels.
neighbours = [(self._x - 1, self._y - 1), (self._x - 1, self._y), \
(self._x, self._y - 1), (self._x + 1, self._y - 1), \
(self._x + 1, self._y)]
# Find the elevation data of the 5 neighbouring pixels.
heights = [self.land[self._x - 1][self._y - 1], \
self.land[self._x - 1][self._y], \
self.land[self._x][self._y - 1], \
self.land[self._x + 1][self._y - 1], \
self.land[self._x + 1][self._y]]
elif self._x == 0 and self._y == 0:
# Condition for coordinate (0, 0).
# Find coordinates of the 3 neighbouring pixels.
neighbours = [(self._x, self._y + 1), (self._x + 1, self._y), \
(self._x + 1, self._y + 1)]
# Find the elevation data of the 3 neighbouring pixels.
heights = [self.land[self._x][self._y + 1], \
self.land[self._x + 1][self._y], \
self.land[self._x + 1][self._y + 1]]
elif self._x == 0 and self._y == self.length:
# Condition for coordinate (0, 99).
# Find coordinates of the 3 neighbouring pixels.
neighbours = [(self._x, self._y - 1), (self._x + 1, self._y - 1), \
(self._x + 1, self._y)]
# Find the elevation data of the 3 neighbouring pixels.
heights = [self.land[self._x][self._y - 1], \
self.land[self._x + 1][self._y - 1], \
self.land[self._x + 1][self._y]]
elif self._x == self.length and self._y == 0:
# Condition for coordinate (99, 0).
# Find coordinates of the 3 neighbouring pixels.
neighbours = [(self._x - 1, self._y), (self._x - 1, self._y + 1), \
(self._x, self._y + 1)]
# Find the elevation data of the 3 neighbouring pixels.
heights = [self.land[self._x - 1][self._y], \
self.land[self._x - 1][self._y + 1], \
self.land[self._x][self._y + 1]]
elif self._x == self.length and self._y == self.length:
# Condition for coordinate (99, 99).
# Find coordinates of the 3 neighbouring pixels.
neighbours = [(self._x - 1, self._y - 1), (self._x - 1, self._y),
(self._x, self._y - 1)]
# Find the elevation data of the 3 neighbouring pixels.
heights = [self.land[self._x - 1][self._y - 1], \
self.land[self._x - 1][self._y], \
self.land[self._x][self._y - 1]]
# The following code is altered from that at:
# https://stackoverflow.com/questions/364621/
# Get position of the minimum elevation of the neighbouring
# coordinates.
for position, item in enumerate(heights):
if item == min(heights):
min_neighbour = neighbours[position]
# If a neighbouring coordinate has a lower elevation:
if self.land[self._x][self._y] >= min(heights):
# Move agent to position of the neighbour.
self._x = min_neighbour[0]
self._y = min_neighbour[1]
# Append the new coordinate to list 'all_drops'.
self.all_drops.append((min_neighbour[1], min_neighbour[0]))
else:
pass # Else, do nothing.
|
[
"charlotteviner@btinternet.com"
] |
charlotteviner@btinternet.com
|
ed82372d90ffc4a56ad8646a850b8f8a2d285b6d
|
ca8827f8c002e59f7a3588746d9fa2eeb68e918d
|
/Round1/old_structure_analysis/old_structure_analysis/ext.py
|
b61874116bd673dddbf99d7e554437b8855383a3
|
[
"MIT"
] |
permissive
|
NavneelSinghal/HCLHackIITK
|
0af243121b50410ae3400aac70c3c2f4bb52ec63
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
refs/heads/master
| 2023-02-16T09:53:33.231186
| 2021-01-14T08:42:35
| 2021-01-14T08:42:35
| 283,153,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,422
|
py
|
import collections
import os
import sys
def get_feature_dict(filename, ignore_indent=False):
lines = open(os.path.join(filename, 'Structure_Info.txt'), 'r').readlines()
kv = collections.defaultdict(lambda: [0., 0., float('inf'), 0.])
ret = collections.defaultdict(float)
all_size = 0
for l in lines:
if len(l.strip()) == 0:
continue
if l[0] == '-' or l.lstrip()[0] == '[':
# new context
if ignore_indent:
name = l.strip()
else:
name = l.rstrip()
else:
try:
if name.lstrip()[1:7].lower() == 'string':
continue
# kv pair
k, v = l.split(': ')
k = k.lstrip()
v = v.strip()
if k[:2] == '0x':
# memory line
k = k.split(' ')[-1]
if k.lower().find('address') != -1:
continue
try:
# numerical value
if v[:2] == '0x':
# hex numbers
u = ''
for c in v[2:]:
if c not in '0123456789abcdefABCDEF':
break
u += c
val = float(int(u, 16))
else:
# entropy and possibly other floats
val = float(v.split(' ')[0])
arr = kv[name + ':' + k]
arr[0] += 1 # ctr
arr[1] += val # sum
arr[2] = min(arr[2], val) # min
arr[3] = max(arr[3], val) # max
if k.lower().find('size') != -1:
all_size += val
except:
# non-numerical values
if k == 'Name':
# section name
name += '#' + v
continue
if v.find(', ') != -1:
# list of flags, or such
for x in v.split(', '):
ret[curr + ':' + k + ':' + x] = 1.
# else value is string, eg. hash
except:
# non-kv lines
if l.find('dll') != -1:
# dll line
ret[l.split('.')[0] + '.dll'] += 1.
all_ctr = 0
for k, arr in kv.items():
if k.lower().find('size') != -1:
arr[1] /= all_size
if arr[0] > 1 and arr[2] != arr[3]:
ret[k + ':mean'] = arr[1] / max(1, arr[0])
ret[k.split(':')[0] + ':ctr'] = arr[0]
all_ctr += arr[0]
ret[k + ':min'] = arr[2]
ret[k + ':max'] = arr[3]
elif arr[1] != 0:
ret[k + ':mean'] = arr[1] / max(1, arr[0])
for k, v in ret.items():
if k[-4:] == ':ctr':
ret[k] /= all_ctr
return ret
def get_feature_dict_no_indent(filename):
return get_feature_dict(filename, True)
if __name__ == '__main__':
if sys.argv[1] == '-n':
for k, v in get_feature_dict(sys.argv[2], True).items():
print(k, ':', v)
else:
for k, v in get_feature_dict(sys.argv[1]).items():
print(k, ':', v)
|
[
"navneel.singhal@ymail.com"
] |
navneel.singhal@ymail.com
|
b8dc35e654265a3bd10036c3bae134afdaae4e55
|
9482bdaa9829716997a7811eda781a2064952b8a
|
/predict.py
|
d246bba2c73e1fdc87329dedc582c075627b08e8
|
[
"MIT"
] |
permissive
|
ramaprv/yolo_pascal_voc
|
f2cec0d1dfd8ef1b8d455a6484d7a62af9760e3a
|
bd1baba97fe3517d7dac5ed589496c24e44fe79a
|
refs/heads/master
| 2022-06-13T14:45:55.173037
| 2020-05-04T21:14:14
| 2020-05-04T21:14:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,198
|
py
|
"""Taken from: http://slazebni.cs.illinois.edu/fall18/assignment3_part2.html """
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.autograd import Variable
from config import VOC_CLASSES, VOC_IMG_MEAN, YOLO_IMG_DIM
def decoder(pred):
'''
pred (tensor) 1xSxSx(B*5+C) -- in our case with resnet: 1x14x14x(2*5+20)
return (tensor) box[[x1,y1,x2,y2]] label[...]
'''
grid_num = pred.squeeze().shape[0] # 14 for resnet50 base, 7 for vgg16
assert (pred.squeeze().shape[0] == pred.squeeze().shape[1]) # square grid
boxes = []
cls_indexs = []
probs = []
cell_size = 1. / grid_num
pred = pred.data
pred = pred.squeeze(0) # SxSx(B*5+C)
object_confidence1 = pred[:, :, 4].unsqueeze(2)
object_confidence2 = pred[:, :, 9].unsqueeze(2)
object_confidences = torch.cat((object_confidence1, object_confidence2), 2)
# Select all predictions above the threshold
min_confidence_threshold = 0.1
mask1 = object_confidences > min_confidence_threshold
# We always want to select at least one predictions so we also take the prediction with max confidence
mask2 = (object_confidences == object_confidences.max())
mask = (mask1 + mask2).gt(0)
# We need to convert the grid-relative coordinates back to image coordinates
for i in range(grid_num):
for j in range(grid_num):
for b in range(2):
if mask[i, j, b] == 1:
box = pred[i, j, b * 5:b * 5 + 4]
contain_prob = torch.FloatTensor([pred[i, j, b * 5 + 4]])
xy = torch.FloatTensor([j, i]) * cell_size # upper left corner of grid cell
box[:2] = box[:2] * cell_size + xy # return cxcy relative to image
box_xy = torch.FloatTensor(box.size()) # convert[cx,cy,w,h] to [x1,xy1,x2,y2]
box_xy[:2] = box[:2] - 0.5 * box[2:]
box_xy[2:] = box[:2] + 0.5 * box[2:]
max_prob, cls_index = torch.max(pred[i, j, 10:], 0)
if float((contain_prob * max_prob)[0]) > 0.1:
boxes.append(box_xy.view(1, 4))
cls_indexs.append(cls_index)
probs.append(contain_prob * max_prob)
if len(boxes) == 0:
boxes = torch.zeros((1, 4))
probs = torch.zeros(1)
cls_indexs = torch.zeros(1)
else:
boxes = torch.cat(boxes, 0)
probs = torch.cat(probs, 0)
cls_indexs = torch.stack(cls_indexs, dim=0)
# Perform non-maximum suppression so that we don't predict many similar and overlapping boxes
keep = nms(boxes, probs)
return boxes[keep], cls_indexs[keep], probs[keep]
def nms(bboxes,scores,threshold=0.5):
'''
bboxes(tensor) [N,4]
scores(tensor) [N,]
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_,order = scores.sort(0,descending=True)
keep = []
while order.numel() > 0:
i = order[0] if order.numel() > 1 else order.item()
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
def predict_image(model, image_name, root_img_directory=''):
"""
Predict output for a single image
:param model: detector model for inference
:param image_name: image file name e.g. '0000000.jpg'
:param root_img_directory:
:return: List of lists containing:
- (x1, y1)
- (x2, y2)
- predicted class name
- image name
- predicted class probability
"""
result = []
image = cv2.imread(os.path.join(root_img_directory + image_name))
h, w, _ = image.shape
img = cv2.resize(image, (YOLO_IMG_DIM, YOLO_IMG_DIM))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mean = VOC_IMG_MEAN
img = img - np.array(mean, dtype=np.float32)
transform = transforms.Compose([transforms.ToTensor(), ])
img = transform(img)
with torch.no_grad():
img = Variable(img[None, :, :, :])
img = img.cuda()
pred = model(img) # 1xSxSx(B*5+C)
pred = pred.cpu()
boxes, cls_indexs, probs = decoder(pred)
for i, box in enumerate(boxes):
x1 = int(box[0] * w)
x2 = int(box[2] * w)
y1 = int(box[1] * h)
y2 = int(box[3] * h)
cls_index = cls_indexs[i]
cls_index = int(cls_index) # convert LongTensor to int
prob = probs[i]
prob = float(prob)
result.append([(x1, y1), (x2, y2), VOC_CLASSES[cls_index], image_name, prob])
return result
|
[
"pulkitkumar95@vpn-054.umiacs.umd.edu"
] |
pulkitkumar95@vpn-054.umiacs.umd.edu
|
dcdb7f236443dbfde9a936d17d86d13a409c60fd
|
2589f33994195434c2ac23cd66bb3821a11562cd
|
/myapp/migrations/0015_register.py
|
0414299658c7ef80dfb05f2f9ec5ce1a5d444a27
|
[] |
no_license
|
AakankshaHanda/cancer_probe
|
fe215c932c2fafb6f1538571cf731d7a55cf6125
|
169676a0d0f545c63a2b3edc182a27e87be697c2
|
refs/heads/master
| 2022-10-01T01:32:50.645312
| 2020-06-10T06:22:59
| 2020-06-10T06:22:59
| 268,717,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
# Generated by Django 3.0.5 on 2020-05-11 22:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0014_review'),
]
operations = [
migrations.CreateModel(
name='Register',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=300, unique=True)),
('lname', models.CharField(max_length=300, unique=True)),
('email', models.EmailField(max_length=300, unique=True)),
('birthday', models.CharField(max_length=300, unique=True)),
('password', models.CharField(max_length=30, unique=True)),
('cpassword', models.CharField(max_length=30, unique=True)),
],
),
]
|
[
"akkshanda25@gmail.com"
] |
akkshanda25@gmail.com
|
e26c09292ddea700910bce3519bcf68cfacefdcc
|
f939f97c4031e9f18a1aba8d7f67194ca3cdeb92
|
/script.py
|
693e139bd5c271515fbc9312b380841cd6f543dd
|
[] |
no_license
|
conranpearce/genetic-algorithm
|
99d749dd8827e115392ebf6ba53eeb1f90589947
|
c9ca6833b7e86905939ff0b97bd973df32918bad
|
refs/heads/main
| 2023-07-13T15:41:17.153044
| 2021-08-19T08:06:21
| 2021-08-19T08:06:21
| 397,567,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,991
|
py
|
import random
import math
import plotly.express as px
import pandas as pd
from numpy import random as rand
# Global Variables
N = 20 # Chromosome length
P = 50 # Population
GENERATIONS = 300 # Increased to run more generations, decreasing the fitness
LOWER_BOUND = -32.0
UPPER_BOUND = 32.0
ITERATIONS = 10
SWAP_AMOUNT = int(P / 5) # How many individuals are replaced with better solutions from the offspring
MUTATION_RATE = round(((1/P) + (1/N)) / 2, 2) # Mutation rate is between 1/population and 1/chromosome length
FITNESS_FUNCTION = "minimisationFitnessFunction2"
population, offspring = [], []
meanList, generationArr, bestArr, meanArr, minArr, minArrAverage, minAverageCalculated = [], [], [], [], [], [], []
# Setting the min average array values to 0 so that summation is able to take place
for i in range(0, GENERATIONS):
minArrAverage.append(0)
class individual:
gene = []
fitness = 0
# Outputting a graph to display the average minimum value across the iterations of the GA
def setGraphMinimumAverage():
d = {'Min Fitness Average': minAverageCalculated, 'Generations': generationArr }
df = pd.DataFrame(data=d)
fig = px.line(df, x='Generations', y='Min Fitness Average', title="GA")
fig.show()
# Outputting a graph to visibly see the change in the fitness of the population after selection, crossover and mutation
def setGraph():
d = {'Min Fitness': minArr, 'Mean Fitness': meanArr, 'Generations': generationArr } # This graph displays mean and minimum function
df = pd.DataFrame(data=d)
fig = px.line(df, x='Generations', y=['Min Fitness', 'Mean Fitness'], title="GA")
fig.show()
# Setting and outputting the end fitness after tournament selection, crossover and mutation
def calcFitnesses(currentGeneration):
endFitness, meanFitness = 0, 0
minFitness = 10000
# Calculate the minimum fitness by looping through the individuals in the population
for i in population:
endFitness = round(endFitness + i.fitness, 1)
# Work out best fitness of the population (lowest fitness value)
if (i.fitness < minFitness):
minFitness = round(i.fitness,1)
meanFitness = round(endFitness / P, 1) # Rounded mean fitness to nearest integer
meanList.append(meanFitness)
generationArr.append(currentGeneration)
minArr.append(minFitness)
meanArr.append(meanFitness)
# Output the minimum fitness value and mean fitness value
print("Min fitness is: ", minFitness)
print("Mean fitness is: ", meanFitness)
# Setting individual genes to random values between the lower and upper bound
def setPopulation():
for x in range (0, P):
tempgene=[]
for x in range (0, N):
tempgene.append(round(random.uniform(LOWER_BOUND, UPPER_BOUND), 1))
newIndividual = individual()
newIndividual.gene = tempgene[:]
population.append(newIndividual)
# Setting fitness value in population
for i in population:
i.fitness = fitnessFunction(i.gene)
# Depending on which fitness function the user has chosen carry out that fitness calculation
def fitnessFunction(ind):
if FITNESS_FUNCTION == "minimisationFitnessFunction":
fitness = minimisationFitnessFunction(ind)
return fitness
elif FITNESS_FUNCTION == "minimisationFitnessFunction2":
fitness = minimisationFitnessFunction2(ind)
return fitness
elif FITNESS_FUNCTION == "schwefelFitnessFunction":
fitness = schwefelFitnessFunction(ind)
return fitness
# Setting the fitness of an individual with a minimisation function. Lower and upper bounds are -5.12 and 5.12 respectively
def minimisationFitnessFunction(ind):
fitness = N*len(ind)
for i in range(0, len(ind)):
fitness = fitness + (ind[i] * ind[i] - 10*math.cos(2*math.pi*ind[i]))
return fitness
# Setting the fitness of an individual with a minimisation function. Lower and upper bounds are -32.0 and 32.0 respectively
def minimisationFitnessFunction2(ind):
fitnessSumFirst = 0
fitnessSumSecond = 0
for i in range(0, len(ind)):
fitnessSumFirst += (ind[i] ** 2)
fitnessSumSecond += math.cos(2*math.pi*ind[i])
fitness = -20 * math.exp(-0.2 * math.sqrt((1/N) * fitnessSumFirst)) - math.exp((1/N) * fitnessSumSecond)
return fitness
# Fitness function calculation for Schwefel's Function. Lower and upper bounds are -500.0 and 500.0 respectively
def schwefelFitnessFunction(ind):
fitnessSum = 0
for i in range(0, len(ind)):
fitnessSum += ind[i] * math.sin(math.sqrt(math.fabs(ind[i])))
fitness = (418.9829 * N) - fitnessSum
return fitness
# Check if the fitness of the worst index in the original population is less than the fitness of the best index in the new population and swap the gene
def replaceChromosomes(lowestIndex, largestIndex, tempOffspring):
if population[largestIndex].fitness > fitnessFunction(tempOffspring[lowestIndex]):
population[largestIndex].gene = tempOffspring[lowestIndex]
population[largestIndex].fitness = fitnessFunction(tempOffspring[lowestIndex])
# Find the worst solution in the population (largest fitness value) and return the index of this solution
def largestFitnessSolution():
largest = -1000
largestIndex = 0
for i in range(0, P):
tempFitness = fitnessFunction(population[i].gene)
if tempFitness > largest:
largest = tempFitness
largestIndex = i
return largestIndex
# Find the best solution in the new offspring population (lowest fitness value) and return the index of this solution
def lowestFitnessSolution(tempOffspring):
lowest = 1000
lowestIndex = 0
for i in range(0, P):
if (fitnessFunction(tempOffspring[i]) < lowest):
lowest = fitnessFunction(tempOffspring[i])
lowestIndex = i
return lowestIndex
# Using tournament selection to set the offspring population
def tournamentSelection(population):
for i in range(0, P):
parent1 = random.randint(0, P-1)
off1 = population[parent1]
parent2 = random.randint(0, P-1)
off2 = population[parent2]
if off1.fitness < off2.fitness:
offspring.append(off1)
else:
offspring.append(off2)
# Using roulette-Wheel selection to set the offspring population
def rouletteWheelSelection(population):
# Calculating the total fitness of the population
totalFitnessPopulation = 0
for i in population:
totalFitnessPopulation += i.fitness
for i in range(0, P):
# Select a random point from 0 to the total fitness value of the original population
selectionPoint = random.randint(math.floor(totalFitnessPopulation), 0)
runningTotal = math.floor(totalFitnessPopulation)
j = 0
# While the running total is not less than the selection point append the fitness of value of an individual in the population to the running total
while (runningTotal >= selectionPoint) and (j < P):
runningTotal -= population[j].fitness
j = j + 1
# When the running total is less than the selection point, append the last individual from the population which fitness what added to the running total
offspring.append(population[j - 1])
# Using rank selection to set the offspring population
def rankSelection(population):
# Sort the individuals in the population in accessinding order based on the fitness value of the individuals
for i in range(0, P):
for j in range (0, P - i - 1):
# Swap the individuals in the population position based on if the fitness is greater than another
if (population[j].fitness > population[j+1].fitness):
temp = population[j]
population[j] = population[j+1]
population[j+1] = temp
# Give a ranking from 0 to the size of the population to the individuals
rankSum = 0
for i in range(0, P):
# Setting the rank
population[i].rank = P - i
# Append to the rank sum value
rankSum += population[i].rank
for i in range(0, P):
# Setting the selection point based on a random integer between 0 and the sum of the ranked population
selectionPoint = random.randint(0, rankSum)
runningTotal = 0
j = 0
# While the running total is not greater than the selection point append the ranking of value of an individual in the population to the running total
while runningTotal <= selectionPoint and (j < P):
runningTotal += population[j].rank
j = j + 1
# When the running total is greater than the selection point, append the last individual from the population which fitness what added to the running total
offspring.append(population[j - 1])
# Single point crossover
def singlePointCrossover(tempOffspring):
# Iterate in 2 for pairs
for i in range(0, P, 2):
# Carry out crossover from a random point from the second position in the chromosome (array index 1)
crossoverPoint = random.randint(1, N-1)
# Setting the children equal to the original gene in the array before the crossover plus the alternative crossover
tempA = offspring[i].gene[:crossoverPoint] + offspring[i+1].gene[crossoverPoint:]
tempB = offspring[i+1].gene[:crossoverPoint] + offspring[i].gene[crossoverPoint:]
# Append the new solutions to the new array
tempOffspring.append(tempA)
tempOffspring.append(tempB)
# Multi Point Crossover
def multiPointCrossover(tempOffspring):
# Finding the two crossover points
crossoverPoint1 = 0
crossoverPoint2 = 0
# If N mod 3 returns 0 then split the chromosome into three equal parts, using two crossover points
if N % 3 == 0:
crossoverPoint1 = (N / 3)
crossoverPoint2 = (N / 3) * 2
# If the chromosome does not split into three equal parts then work out where to put the crossover points
else:
crossoverPoint1 = round(N / 3)
crossoverPoint2 = round(N / 3) * 2
# Iterate in 2 for pairs
for i in range(0, P, 2):
# Carry out crossover for two crossover points (multi-point crossover)
tempA = offspring[i].gene[:crossoverPoint1] + offspring[i+1].gene[crossoverPoint1:crossoverPoint2] + offspring[i].gene[crossoverPoint2:]
tempB = offspring[i+1].gene[:crossoverPoint1] + offspring[i].gene[crossoverPoint1:crossoverPoint2] + offspring[i+1].gene[crossoverPoint2:]
# Append the new solutions to the new array
tempOffspring.append(tempA)
tempOffspring.append(tempB)
# Uniform Crossover
def uniformCrossover(tempOffspring):
# Iterate in 2 for pairs
for i in range(0, P, 2):
tempA = []
tempB = []
# Flip a coin (random integer of 0 or 1) to decide if each chromosome will be included in the off-spring (crossed over)
# for j in range(0, len(offspring[i].gene)):
for j in range(0, N):
# Coin flip - random integer of 0 or 1 is produced
if random.randint(0, 1) == 0:
tempA.append(offspring[i+1].gene[j])
tempB.append(offspring[i].gene[j])
else:
tempA.append(offspring[i].gene[j])
tempB.append(offspring[i+1].gene[j])
tempOffspring.append(tempA)
tempOffspring.append(tempB)
# Random mutation within a range of bounds
def randomMutation(tempOffspring):
for i in range(0, P):
for j in range(0, N):
mutationProbability = random.randint(0,100) # Randomly generate a number between 0 and 100
# If the number generated is less than the mutation rate * 100 then flip the gene in the chromosome
if mutationProbability < (100 * MUTATION_RATE):
# Carry out mutation of randomly adding or minusing a number in range from 0.0 to the mutation step
addOrMinus = random.randint(0,1) # Set variable to randomly select minus or plus
# Create a random integer between 0 and the upper bound for mutation step, then alter the genes value by a random integer between 0.0 and the mutation step
mutationStep = round(random.uniform(0.0, UPPER_BOUND),1)
alter = round(random.uniform(0.0, mutationStep),1)
# If variable equals 0 then minus a random integer in range 0.0 to the mutation step
if (addOrMinus == 0):
if ((tempOffspring[i][j] - alter) >= LOWER_BOUND):
tempOffspring[i][j] = round((tempOffspring[i][j] - alter), 1)
# If the value goes below the lower bound after the minus then set to the lower bound as the minimum value it can be
else:
tempOffspring[i][j] = LOWER_BOUND
# If variable does not equal 0 then plus a random integer in range 0.0 to the mutation step
else:
if ((tempOffspring[i][j] + alter) <= UPPER_BOUND):
tempOffspring[i][j] = round((tempOffspring[i][j] + alter), 1)
# If the value goes above 1.0 after the addition then set to the upper bound as the maximum value it can be
else:
tempOffspring[i][j] = UPPER_BOUND
# Gaussian mutation, mutation within a range of a normal distribution
def gaussianMutation(tempOffspring):
# Carry out mutation on every individual in population
for i in range(0, P):
for j in range(0, N):
mutationProbability = random.randint(0,100)
if mutationProbability < (100 * MUTATION_RATE):
# Loc indicates the center of the distribution and Scale indicates the spread of the distribution
alter = round(float(rand.normal(loc=0, scale=5, size=(1))),1)
if ((tempOffspring[i][j] + alter) >= LOWER_BOUND) and ((tempOffspring[i][j] + alter) <= UPPER_BOUND):
tempOffspring[i][j] = tempOffspring[i][j] + alter
elif ((tempOffspring[i][j] + alter) < LOWER_BOUND):
tempOffspring[i][j] = LOWER_BOUND
elif ((tempOffspring[i][j] + alter) > UPPER_BOUND):
tempOffspring[i][j] = UPPER_BOUND
# Creating a random end point for scrambled mutation
def calculateEndPoint(startPoint):
endPoint = random.randint(startPoint, N)
# Checking that not the whole gene is scrambled
if endPoint == N:
calculateEndPoint(startPoint)
return endPoint
# Scrambled Mutation
def scrambleMutation(tempOffspring):
# Carry out mutation on every individual in population
for i in range(0, P):
mutationProbability = random.randint(0,100)
if mutationProbability < (100 * MUTATION_RATE):
# Create a starting and end point of where the scrambled mutation on the individuals should take place
startingPoint = random.randint(0, N-1) # Making sure that more than one gene is mutated
endPoint = calculateEndPoint(startingPoint)
# Shuffle the genes in the chromosome between the start and end point
shuffledArray = []
for j in range(startingPoint, endPoint):
shuffledArray.append(tempOffspring[i][j])
rand.shuffle(shuffledArray)
# Put the scrambled array back into the individual
for x in range(startingPoint, endPoint):
for y in range(0, len(shuffledArray)):
tempOffspring[i][x] = shuffledArray[y]
# Clear the array values so that the GA is able to iterate again
def clearArrays():
meanList.clear()
generationArr.clear()
bestArr.clear()
meanArr.clear()
minArr.clear()
population.clear()
offspring.clear()
# Main function to start code from
def main():
# Carry out iterations of the GA so that we can plot the average of the results
for x in range(0,ITERATIONS):
setPopulation() # Setting population of individuals
# Carry out crossover and mutation for as many generations set, this is the termination condition of the algorithm
for i in range(1, GENERATIONS + 1):
print("\nGeneration ", i)
tempOffspring = []
# Selection
tournamentSelection(population)
# Crossover
singlePointCrossover(tempOffspring)
# Mutation
randomMutation(tempOffspring)
# This range determines how many solutions are selected and swapped per run
for j in range(0, SWAP_AMOUNT):
largestIndex = largestFitnessSolution() # Finding worst solution in the population (individual with the largest fitness value)
lowestIndex = lowestFitnessSolution(tempOffspring) # Finding best solution in the temporary offspring (lowest fitness value)
replaceChromosomes(lowestIndex, largestIndex, tempOffspring) # Set the worst case of the original population to equal the best case of the temp offspring population
# Calculate and print the fitness of the population after selection, mutation and crossover
calcFitnesses(i)
# setGraph() # Setting individual iteration graph using plotly
for i in range(0, GENERATIONS):
minArrAverage[i] += minArr[i]
clearArrays() # Re set the values of the arrays so that other iterations of the GA can occur
# Appending the average minimum fitness results from the GA run
for i in range(0, len(minArrAverage)):
minAverageCalculated.append(round(float(minArrAverage[i] / ITERATIONS),1))
generationArr.append(i+1) # Set the generation array equal to how many generations occur per run
setGraphMinimumAverage() # Output the average results on the graph
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
conranpearce.noreply@github.com
|
0c4359df5c7eee324f79aee6a6a234dfd7dcc4b6
|
be0edc20433a6ad3bf4b8f448f1c457437de4c52
|
/huxley/accounts/exceptions.py
|
e17051478da72471a69878919d3035f3e31e57cc
|
[
"BSD-3-Clause"
] |
permissive
|
ethanlee16/huxley
|
eca8c3c1d4ea543a5875c28d4cb5c81dc4e4eddb
|
5d601e952c711e9b6703170c78fb23fcc2734ead
|
refs/heads/master
| 2021-01-15T09:20:25.310737
| 2014-12-03T14:51:33
| 2014-12-03T14:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
class AuthenticationError(LookupError):
'''Error raised when a user fails to authenticate a User.'''
INVALID_CREDENTIALS = 'The credentials you provided are invalid.'
MISSING_FIELDS = 'One or more of the fields is blank.'
INACTIVE_ACCOUNT = 'Your account is inactive.'
def __init__(self, message):
super(AuthenticationError, self).__init__(message)
@classmethod
def invalid_credentials(cls):
return cls(cls.INVALID_CREDENTIALS)
@classmethod
def missing_fields(cls):
return cls(cls.MISSING_FIELDS)
@classmethod
def inactive_account(cls):
return cls(cls.INACTIVE_ACCOUNT)
class PasswordChangeFailed(Exception):
'''Error raised when a user fails to change their password.'''
MISSING_FIELDS = 'One or more fields is blank.'
PASSWORD_TOO_SHORT = 'New password must be at least 6 characters long.'
INVALID_CHARACTERS = 'New password can only consist of alphanumeric characters and symbols (above numbers).'
INCORRECT_PASSWORD = 'Incorrect password.'
def __init__(self, message):
super(PasswordChangeFailed, self).__init__(message)
@classmethod
def missing_fields(cls):
return cls(cls.MISSING_FIELDS)
@classmethod
def password_too_short(cls):
return cls(cls.PASSWORD_TOO_SHORT)
@classmethod
def invalid_characters(cls):
return cls(cls.INVALID_CHARACTERS)
@classmethod
def incorrect_password(cls):
return cls(cls.INCORRECT_PASSWORD)
|
[
"k.mehta@berkeley.edu"
] |
k.mehta@berkeley.edu
|
a62b036f57c1874d0ce3d82a3468729f8b5bf2bb
|
65c001b5f572a6b0ca09dd9821016d628b745009
|
/frappe-bench/env/lib/python2.7/site-packages/github/Team.py
|
8c28a758a5909d410d42641226a8f708cd339f5e
|
[
"LGPL-3.0-or-later",
"MIT"
] |
permissive
|
ibrahmm22/library-management
|
666dffebdef1333db122c2a4a99286e7c174c518
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
refs/heads/master
| 2022-10-30T17:53:01.238240
| 2020-06-11T18:36:41
| 2020-06-11T18:36:41
| 271,620,992
| 0
| 1
|
MIT
| 2022-10-23T05:04:57
| 2020-06-11T18:36:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 17,254
|
py
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Jan Orel <jan.orel@gooddata.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Aron Culotta <aronwc@gmail.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 mattjmorrison <mattjmorrison@mattjmorrison.com> #
# Copyright 2018 Isuru Fernando <isuruf@gmail.com> #
# Copyright 2018 Jacopo Notarstefano <jacopo.notarstefano@gmail.com> #
# Copyright 2018 James D'Amato <james.j.damato@gmail.com> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Manu Hortet <manuhortet@gmail.com> #
# Copyright 2018 Michał Górny <mgorny@gentoo.org> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Tim Boring <tboring@hearst.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import absolute_import
import six
import github.GithubObject
import github.NamedUser
import github.Organization
import github.PaginatedList
import github.Repository
import github.TeamDiscussion
from . import Consts
class Team(github.GithubObject.CompletableGithubObject):
"""
This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "name": self._name.value})
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def members_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._members_count)
return self._members_count.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def permission(self):
"""
:type: string
"""
self._completeIfNotSet(self._permission)
return self._permission.value
@property
def repos_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._repos_count)
return self._repos_count.value
@property
def repositories_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repositories_url)
return self._repositories_url.value
@property
def slug(self):
"""
:type: string
"""
self._completeIfNotSet(self._slug)
return self._slug.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def privacy(self):
"""
:type: string
"""
self._completeIfNotSet(self._privacy)
return self._privacy.value
def add_to_members(self, member):
"""
This API call is deprecated. Use `add_membership` instead.
https://developer.github.com/v3/teams/members/#deprecation-notice-1
:calls: `PUT /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/members/" + member._identity
)
def add_membership(self, member, role=github.GithubObject.NotSet):
"""
:calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.Nameduser.NamedUser`
:param role: string
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
assert role is github.GithubObject.NotSet or isinstance(
role, (str, six.text_type)
), role
if role is not github.GithubObject.NotSet:
assert role in ["member", "maintainer"]
put_parameters = {
"role": role,
}
else:
put_parameters = {
"role": "member",
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/memberships/" + member._identity, input=put_parameters
)
def add_to_repos(self, repo):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/repos/" + repo._identity
)
def set_repo_permission(self, repo, permission):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {
"permission": permission,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/repos/" + repo._identity, input=put_parameters
)
def delete(self):
"""
:calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
name,
description=github.GithubObject.NotSet,
permission=github.GithubObject.NotSet,
privacy=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param description: string
:param permission: string
:param privacy: string
:rtype: None
"""
assert isinstance(name, (str, six.text_type)), name
assert description is github.GithubObject.NotSet or isinstance(
description, (str, six.text_type)
), description
assert permission is github.GithubObject.NotSet or isinstance(
permission, (str, six.text_type)
), permission
assert privacy is github.GithubObject.NotSet or isinstance(
privacy, (str, six.text_type)
), privacy
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
if privacy is not github.GithubObject.NotSet:
post_parameters["privacy"] = privacy
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
self._useAttributes(data)
def get_discussions(self):
"""
:calls: `GET /teams/:id/discussions <https://developer.github.com/v3/teams/discussions/#list-discussions>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.TeamDiscussion.TeamDiscussion`
"""
return github.PaginatedList.PaginatedList(
github.TeamDiscussion.TeamDiscussion,
self._requester,
self.url + "/discussions",
None,
headers={"Accept": Consts.mediaTypeTeamDiscussionsPreview},
)
def get_members(self, role=github.GithubObject.NotSet):
"""
:calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert role is github.GithubObject.NotSet or isinstance(
role, (str, six.text_type)
), role
url_parameters = dict()
if role is not github.GithubObject.NotSet:
assert role in ["member", "maintainer", "all"]
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
url_parameters,
)
def get_repos(self):
"""
:calls: `GET /teams/:id/repos <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository, self._requester, self.url + "/repos", None
)
def invitations(self):
"""
:calls: `GET /teams/:id/invitations <https://developer.github.com/v3/teams/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/invitations",
None,
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
)
def has_in_members(self, member):
"""
:calls: `GET /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET", self.url + "/members/" + member._identity
)
return status == 204
def has_in_repos(self, repo):
"""
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson(
"GET", self.url + "/repos/" + repo._identity
)
return status == 204
def remove_membership(self, member):
"""
:calls: `DELETE /teams/:team_id/memberships/:username <https://developer.github.com/v3/teams/members/#remove-team-membership>`
:param member:
:return:
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/memberships/" + member._identity
)
def remove_from_members(self, member):
"""
This API call is deprecated. Use `remove_membership` instead:
https://developer.github.com/v3/teams/members/#deprecation-notice-2
:calls: `DELETE /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/members/" + member._identity
)
def remove_from_repos(self, repo):
"""
:calls: `DELETE /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/repos/" + repo._identity
)
@property
def _identity(self):
return self.id
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._members_count = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._permission = github.GithubObject.NotSet
self._repos_count = github.GithubObject.NotSet
self._repositories_url = github.GithubObject.NotSet
self._slug = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._privacy = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "members_count" in attributes: # pragma no branch
self._members_count = self._makeIntAttribute(attributes["members_count"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "permission" in attributes: # pragma no branch
self._permission = self._makeStringAttribute(attributes["permission"])
if "repos_count" in attributes: # pragma no branch
self._repos_count = self._makeIntAttribute(attributes["repos_count"])
if "repositories_url" in attributes: # pragma no branch
self._repositories_url = self._makeStringAttribute(
attributes["repositories_url"]
)
if "slug" in attributes: # pragma no branch
self._slug = self._makeStringAttribute(attributes["slug"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(
github.Organization.Organization, attributes["organization"]
)
if "privacy" in attributes: # pragma no branch
self._privacy = self._makeStringAttribute(attributes["privacy"])
|
[
"iabouelftouh@trudoc24x7.com"
] |
iabouelftouh@trudoc24x7.com
|
370de429b5de32664a5d850ea94da51b7d4a1b58
|
207544107302b1a837d50f9140f45482685a166c
|
/temp.py
|
457c36e7bc5b495a84ea1b62db183f739e312ee2
|
[] |
no_license
|
surajdurgesht/Machine-Learning-Lab
|
16ec874f1f82afa1d9a6644b84798ada2fa03d2c
|
09f1e9506d82291255cb5106e2a764fe96189fa8
|
refs/heads/master
| 2020-06-04T19:09:30.297071
| 2019-06-16T06:34:20
| 2019-06-16T06:34:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,894
|
py
|
# -*- coding: utf-8 -*-
"""
@author: Suraj
"""
import numpy as np
import matplotlib.pyplot as plt
from math import pi
def get_experiment_data(num_datasets, target_fn):
num_samples = num_datasets * 2
s = np.random.uniform(-1, 1, num_samples)
s = s.reshape(num_datasets, 2)
x1, x2 = s[:, 0], s[:, 1]
y1, y2 = target_fn(x1), target_fn(x2)
p1 = np.column_stack((x1, y1))
p2 = np.column_stack((x2, y2))
return p1, p2
def print_data(label, value, width=10):
print('{:{}}: {}'.format(label, width, value))
def get_slope(p1, p2):
#slope = (y2 - y1) / (x2 - x1)
return (p2[:, 1] - p1[:, 1]) / (p2[:, 0] - p1[:, 0])
def get_y_intercept(p, m):
# y - y1 = m(x - x1)
# y - y1 = mx - mx1
# y = mx - mx1 + y1
# let b = -mx1 + y1
return -m * p[:, 0] + p[:, 1]
def calculate_bias(x, m, b, target_fn):
g_avg = get_g_avg_vect(x, m, b)
f_x = target_fn(pi * x)
return round(mean_sum_squared_error(g_avg, f_x), 2)
def calculate_var(x, m, b):
g = hypothesis_fn(m, x, b)
g_avg = get_g_avg_vect(x, m, b)
return round(mean_sum_squared_error(g, g_avg), 2)
def get_g_avg_vect(x, m, b):
g_avg = np.full_like(x, 1)
for i, this_x in enumerate(x):
g_avg[i] = calculate_g_avg(m, this_x, b)
return g_avg
def calculate_g_avg(m, x, b):
# this must take in a single x
x_vect = np.full_like(m, x)
return np.average(hypothesis_fn(m, x_vect, b))
def mean_sum_squared_error(x1, x2):
return np.average(np.square(x1 - x2))
def get_x(num_pts):
return np.linspace(-1, 1, num_pts)
def hypothesis_fn(m, x, b):
return m * x + b
def plot_exp(m, b, target_fn):
plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots()
x = np.linspace(-1, 1, 100)
#ax.plot(x, target_fn(pi *x) )
# plot each hypothesis fn
for this_m, this_b in zip(m, b):
ax.plot(x, hypothesis_fn(this_m, x, this_b), color='gray', alpha=0.2)
ax.plot(x, target_fn(pi * x), label='f(x)')
ax.plot(x, get_g_avg_vect(x, m, b), color='r', label='avg g(x)')
ax.legend(facecolor='w', fancybox=True, frameon=True, edgecolor='black', borderpad=1)
plt.show()
def main():
num_datasets = 100
#time = np.arange(-1, 1,0.001)
#x = np.linspace(0, 1, 100)
target_fn = np.square
#plt.plot(time, target_fn)
p1, p2 = get_experiment_data(num_datasets, target_fn)
m = get_slope(p1, p2)
b = get_y_intercept(p1, m)
bias = calculate_bias(get_x(num_datasets), m, b, target_fn)
var = calculate_var(get_x(num_datasets), m, b)
print_data('bias', bias)
print_data('var', var)
print_data('e_out', round(bias + var, 2))
plot_exp(m, b, target_fn)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
surajdurgesht.noreply@github.com
|
0d1a4210be5fd242dce89a24edccb2f7ba0eacc8
|
ec3cd6ea0ca48ddf830d116c8f1326c772e15ded
|
/Security Tools/Scanners/Port_Scanner.py
|
aac8501dffc24071e12dfe37060a24ff68ed48da
|
[
"MIT"
] |
permissive
|
SWCousins/TootSuite
|
10d9fb9234750d8881ab938e71bbe90d29f5a71e
|
9e581d6828f07044cbbe3f71c86e896ff4c65835
|
refs/heads/master
| 2023-04-09T03:38:34.816813
| 2023-03-26T01:13:00
| 2023-03-26T01:13:00
| 143,488,188
| 0
| 0
|
MIT
| 2020-01-07T04:36:05
| 2018-08-04T01:32:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
import sys
import socket
ip = '192.168.1.11'
open_ports = []
ports = range(1, 65535)
def probe_port(ip, port, result = 1):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
r = sock.connect_ex((ip, port))
if r == 0:
result = r
sock.close()
except Exception as e:
pass
return result
for port in ports:
sys.stout.flush()
response = probe_port(ip, port)
if response == 0:
open_ports.append(port)
if open_ports:
print ("Open Ports are: ")
print (sorted(open_ports))
else:
print("No open ports found.")
|
[
"spencercousins@outlook.com"
] |
spencercousins@outlook.com
|
a9e3052430c8ee57e1fd25ab7bd40fd094129190
|
a3eb732ead7e1d10a85a88e42dc639eb16a40265
|
/tests/instagram/fixtures/endpoints/login.py
|
5582777dbd0c27cf09995003e2fda2f8c899945a
|
[
"MIT"
] |
permissive
|
carsam2021/instagram_api
|
7654c0f485c22935cf478016e46e65acbeda9344
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
refs/heads/master
| 2023-03-16T14:06:27.515432
| 2020-10-17T04:39:19
| 2020-10-17T04:39:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import json
import httpretty
import pytest
@httpretty.activate
@pytest.fixture(scope='session')
def accounts_login_success(request, base_uri):
httpretty.register_uri(
httpretty.POST,
''.join([base_uri, 'accounts/login/']),
body=json.dumps({}),
)
|
[
"root@proscript.ru"
] |
root@proscript.ru
|
af3c186e1285b14a2977ff6f96973739bc572921
|
aa404e67da6affb43fed60d6c267505833c59a28
|
/training/eval.py
|
48f9fdf56772d327282a10a1c46fedd4a728dbf1
|
[] |
no_license
|
MikszaM/rl-quadruped
|
b3a1a6bb6c96570c85a2805b7285c26d6c7303b4
|
6bcaa76a7ba06c4007144cf4418ec273b362da62
|
refs/heads/master
| 2022-12-18T18:13:06.603163
| 2020-09-23T18:06:06
| 2020-09-23T18:06:06
| 269,415,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines import PPO2
import gym
import mm_walker
from stable_baselines.common.policies import FeedForwardPolicy, register_policy
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv, VecNormalize
import pybullet_envs
class CustomPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args, **kwargs,
net_arch=[dict(pi=[128, 128, 128],
vf=[128, 128, 128])],
feature_extraction="mlp")
tensorboard_log = "rl-quadruped/training/tensorboard"
model_filepath = "rl-quadruped/training/trainedPPO.zip"
veclog = "rl-quadruped/training/trainedPPO.pkl"
env_id = "mm-walker-v0"
env = gym.make(env_id,render=True)
env.seed(0)
env = DummyVecEnv([lambda: env])
#env = VecNormalize(env, norm_obs=True, norm_reward=False, training=False)
env = VecNormalize.load(veclog, env)
env.training = False
env.norm_reward = False
model = PPO2.load(model_filepath, env=env,
tensorboard_log=tensorboard_log, verbose=1, policy=CustomPolicy)
# Evaluate the agent
mean_reward, std_reward = evaluate_policy(model, env, deterministic=True, n_eval_episodes=1)
print(mean_reward, std_reward)
|
[
"miksza.michal@gmail.com"
] |
miksza.michal@gmail.com
|
d7d86fe5dc30eb6d99c91a096dd6e44de927eef5
|
a62805c61b259964fb8c5a4add1935d2a9460773
|
/latexbot/latexbot/simplelatex.py
|
e453db266da03315ba032b9c1b94849ea644dff9
|
[
"MIT"
] |
permissive
|
tylertian123/ryver-latexbot
|
6cc10da5d65fc111e3818102ab5ca80c47440292
|
ff9844a5461429788e2e86d5a679922cda69c21d
|
refs/heads/master
| 2022-11-02T23:51:13.161110
| 2022-10-07T02:16:15
| 2022-10-07T02:34:00
| 230,349,327
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,834
|
py
|
r"""
Convert simple, easy-to-read math expressions into LaTeX.
E.g. `sin(sqrt(e^x + a) / 2)` becomes `\sin \left(\frac{\sqrt{e^{x}+a}}{2}\right)`.
"""
import lark
import typing
GRAMMAR = r"""
%import common.WS
%import common.NUMBER
%ignore WS
VARIABLE: /\.\.\.|inf|oo|[a-zA-Z]+|(\\\w+)/
VAL_SUFFIX: /!|'+/
ARW_OP: /<->|<=>|<-->|<==>|<--|-->|<==|==>|<-|->|<<=|=>/
CMP_OP: /<=|>=|!=|[<>=]/
SUM_OP: /[+-]/
MUL_OP: /[\*\/%]/
POW_OP: /\^|\*\*/
FUNC_NAME.2: /".+?"/
| "sin" | "cos" | "tan" | "sinh" | "cosh" | "tanh"
| "arctan2" | "arcsin" | "arccos" | "arctan" | "arcsinh" | "arccosh" | "arctanh"
| "atan2" | "asin" | "acos" | "atan" | "asinh" | "acosh" | "atanh"
| "exp" | "log" | "ln" | "min" | "max" | "floor" | "ceil"
!func_modifier: POW_OP
| "_"
ROOT.3: "sqrt" | "cbrt"
BIG_SYMB.3: "int" | "iint" | "iiint" | "iiiint" | "oint" | "sum" | "prod" | "lim"
LATEX: /\$.+?\$/
// Values that can be on the right-hand side of an implicit multiplication
?mvalue: VARIABLE -> val
| bvalue
| mvalue VAL_SUFFIX -> suffix
| mvalue POW_OP value -> op
| VARIABLE NUMBER -> sub
| mvalue "_" value -> sub
// Values that cannot be on the right-hand side of an implicit multiplication
?nmvalue: NUMBER -> val
| SUM_OP value -> signed_val
| nmvalue VAL_SUFFIX -> suffix
| nmvalue POW_OP value -> op
| nmvalue "_" value -> sub
// Values that are grouped together with brackets, absolute values, etc.
?bvalue.2: LATEX -> raw_latex
| "(" operation ")"? -> bracket
| "[" operation "]"? -> sbracket
| "{" operation "}"? -> cbracket
| "|" operation "|" -> abs
| FUNC_NAME [func_modifier value [func_modifier value]] value -> func
| FUNC_NAME [func_modifier value [func_modifier value]] "(" operation ("," operation)+ ")" -> func_bracket
| BIG_SYMB [func_modifier value [func_modifier value]] value -> big_symb
| BIG_SYMB [func_modifier value [func_modifier value]] -> big_symb
| ROOT value -> root
| "root" "[" operation "]" value -> nth_root
?value: mvalue
| nmvalue
?mul_op: value
| mul_op MUL_OP value -> op
| mul_op mvalue -> mul
?operation: mul_op
| operation ARW_OP mul_op -> op
| operation CMP_OP mul_op -> op
| operation SUM_OP mul_op -> op
?start: operation
"""
SPECIAL_FUNCS = {
"asin": "\\arcsin",
"acos": "\\arccos",
"atan": "\\arctan",
"asinh": "\\arcsinh",
"acosh": "\\arccosh",
"atanh": "\\arctanh",
"atan2": "\\operatorname{arctan2}",
"arctan2": "\\operatorname{arctan2}",
"floor": "\\operatorname{floor}",
"ceil": "\\operatorname{ceil}",
}
SPECIAL_OPS = {
"**": "^",
"*": "\\cdot ",
"%": "\\bmod ",
">=": "\\geq ",
"<=": "\\leq ",
"!=": "\\neq ",
"->": "\\rightarrow ",
"<-": "\\leftarrow ",
"=>": "\\Rightarrow ",
"<<=": "\\Leftarrow ",
"<->": "\\leftrightarrow ",
"<=>": "\\Leftrightarrow ",
"-->": "\\longrightarrow ",
"<--": "\\longleftarrow ",
"==>": "\\Longrightarrow ",
"<==": "\\Longleftarrow ",
"<-->": "\\longleftrightarrow ",
"<==>": "\\Longleftrightarrow ",
}
SPECIAL_SYMBS = {
"inf": "\\infty ",
"oo": "\\infty ",
"...": "\\ldots ",
}
def strip_bracket(tree: lark.Tree) -> str:
"""
Convert a parse tree into LaTeX and strip the outermost set of brackets if there is one.
"""
if tree.data in ("bracket", "sbracket", "cbracket"):
return tree_to_latex(tree.children[0])
else:
return tree_to_latex(tree)
def op_tree_to_latex(tree: lark.Tree) -> str:
"""
Convert a tree with top level op (operator) into LaTeX.
"""
tree.children[1] = SPECIAL_OPS.get(tree.children[1]) or tree.children[1]
if tree.children[1] == "^":
return f"{tree_to_latex(tree.children[0])}^{{{strip_bracket(tree.children[2])}}}"
elif tree.children[1] == "/":
return f"\\frac{{{strip_bracket(tree.children[0])}}}{{{strip_bracket(tree.children[2])}}}"
else:
return tree_to_latex(tree.children[0]) + tree.children[1] + tree_to_latex(tree.children[2])
def get_modifiers(tree: lark.Tree) -> typing.Tuple[str, str, int]:
"""
Get the superscript and subscript for a function or other thing.
"""
m1 = None
m2 = None
arg_start = 2
if tree.children[1] is not None:
m1 = f"{tree_to_latex(tree.children[1])}{{{strip_bracket(tree.children[2])}}}"
arg_start = 4
if tree.children[3] is not None:
m2 = f"{tree_to_latex(tree.children[3])}{{{strip_bracket(tree.children[4])}}}"
arg_start = 5
return m1, m2, arg_start
def func_tree_to_latex(tree: lark.Tree, bracket: bool) -> str:
"""
Convert a tree with top level func into LaTeX.
"""
if tree.children[0].startswith("\"") and tree.children[0].endswith("\""):
func = f"\\operatorname{{{tree.children[0][1:-1]}}}"
else:
func = SPECIAL_FUNCS.get(tree.children[0], "\\" + tree.children[0])
m1, m2, arg_start = get_modifiers(tree)
if m1:
func += m1
if m2:
func += m2
if bracket:
func += f"\\left({','.join(tree_to_latex(c) for c in tree.children[arg_start:])}\\right)"
else:
func += f" {tree_to_latex(tree.children[arg_start])}"
return func
def root_tree_to_latex(tree: lark.Tree) -> str:
"""
Convert a tree with top level root into LaTeX.
"""
expr = "\\sqrt"
if tree.children[0] == "cbrt":
expr += "[3]"
return f"{expr}{{{strip_bracket(tree.children[1])}}}"
def big_symb_tree_to_latex(tree: lark.Tree) -> str:
"""
Convert a tree with top level big_symb (integral, limit, sum, product) into LaTeX.
"""
expr = "\\" + tree.children[0]
m1, m2, arg_start = get_modifiers(tree)
if m1:
expr += m1
if m2:
expr += m2
if len(tree.children) > arg_start:
expr += tree_to_latex(tree.children[arg_start])
return expr
TREE_PROCESSORS = {
"val": lambda t: SPECIAL_SYMBS.get(t.children[0], t.children[0]),
"func_modifier": lambda t: t.children[0],
"raw_latex": lambda t: t.children[0][1:-1],
"suffix": lambda t: tree_to_latex(t.children[0]) + t.children[1],
"signed_val": lambda t: t.children[0] + tree_to_latex(t.children[1]),
"bracket": lambda t: f"\\left({strip_bracket(t.children[0])}\\right)",
"sbracket": lambda t: f"\\left[{strip_bracket(t.children[0])}\\right]",
"cbracket": lambda t: f"\\left\\{{{strip_bracket(t.children[0])}\\right\\}}",
"abs": lambda t: f"\\left|{strip_bracket(t.children[0])}\\right|",
"func": lambda t: func_tree_to_latex(t, False),
"func_bracket": lambda t: func_tree_to_latex(t, True),
"big_symb": big_symb_tree_to_latex,
"root": root_tree_to_latex,
"nth_root": lambda t: f"\\sqrt[{strip_bracket(t.children[0])}]{{{strip_bracket(t.children[1])}}}",
"sub": lambda t: f"{tree_to_latex(t.children[0])}_{{{tree_to_latex(t.children[1])}}}",
"mul": lambda t: ''.join(tree_to_latex(c) for c in t.children),
"op": op_tree_to_latex,
}
def tree_to_latex(expr: typing.Union[lark.Tree, lark.Token]) -> str:
"""
Convert a parse tree into LaTeX.
"""
if isinstance(expr, lark.Token):
return expr
return TREE_PROCESSORS[expr.data](expr)
def str_to_latex(expr: str) -> str:
r"""
Convert simple, easy-to-read math expressions into LaTeX.
E.g. `sin(sqrt(e^x + a) / 2)` becomes `\sin \left(\frac{\sqrt{e^{x}+a}}{2}\right)` in LaTeX.
As you can see, the syntax for these expressions are designed to be very easy to read and enter compared
to LaTeX. Most of the supported operations are pretty intuitive, such as entering a basic expression with
brackets and arithmetic operations. Some things that may not be immediately obvious are outlined below.
Divisions are automatically converted into fractions and will follow order of operations.
Use brackets if you wish to make a large fraction, e.g. `(sinx+cos^2x)^2/(y+1)`.
In expressions where brackets are unnecessary, such as fractions, powers, subscripts, etc, the outermost
brackets will be stripped away if there is a pair. This means that in many cases you can add brackets to
clarify exactly what you mean, without having those brackets clutter up the final output.
The `%` operator is a modulo. The `**` operator can be used for exponentiation in place of `^`.
There are also comparison operators, including `=`, `!=` (not equal), `>`, `<`, `>=` (greater than or
equal to) and `<=` (less than or equal to).
To do a function call, simply write out the function name and argument(s). Brackets are not necessary; e.g.
both `sin x`, and `sin(x)` are valid. Common functions will be automatically recognized, e.g. sin, cos, log,
etc. To use a custom function name, surround it with double quotes like `"func"(x)`. Function names will be
rendered with a different font (`\operatorname` in LaTeX) compared to variables. You can also put powers and
subscripts on them, e.g. `sin^2x`. Note that here because of order of operations only the 2 is in the power,
and the x is left as the argument to sin.
When implicitly multiplying a variable and a function, there needs to be a space between them. E.g. `x sinx`
and not `xsinx`, as the latter will get parsed as a single variable. There does not need to be a space
between the function name and its argument, even when not using brackets.
To do a square root, cube root, or nth root use `sqrt x`, `cbrt x`, and `root[n] x` respectively.
Note that these operations only take a single value! Use brackets if you want to take the root
of an entire expression, e.g. `sqrt(1+1)`.
To do an integral, limit, summation or product, use one of the following:
- `int`, `iint`, `iiint`, `iiiint` - Integral, double integral, triple integral and quadruple integral
- `oint` - Contour integral
- `sum` - Summation
- `prod` - Product
The bounds can be specified with `_lower^upper`, e.g. `int_0^(x+1)` is an integral from 0 to x+1.
There is also a list of various single- and double-length arrows, such as `->`, `==>`, and two-directional
arrows such as `<->`. Note that the fat left arrow is `<<=` and not `<=`, because the latter is the
less-than-or-equal-to sign.
You can insert subscripts explicitly with `_`, e.g. `x_i`, or automatically by putting a number right
after a letter, e.g. `x1`.
You can use `inf` or `oo` for an infinity symbol and `...` for an ellipsis.
Factorials (`!`), primes (`'`, `''`, ...) are also supported, along with square braces, curly braces and
absolute values.
To insert LaTeX directly into the output, surround it with $, e.g. `$\vec{v}$`.
To insert a single LaTeX command directly into the output, enter it directly with the backslash,
e.g. `sin\theta`.
"""
return tree_to_latex(parser.parse(expr))
parser = lark.Lark(GRAMMAR, start="start", parser="earley", lexer="standard", maybe_placeholders=True)
|
[
"tylertian123@gmail.com"
] |
tylertian123@gmail.com
|
5b0a05836beff1fd6987664da7f32378c3a4ffbe
|
61ce2d27fc36a666ac1c794c2b09a0535698f468
|
/cerach.py
|
926ecf818d7019c00470fc80c458429a0a224db2
|
[] |
no_license
|
TubuOwl/tango-bot
|
22b742e8bb24eeeaf7f7dc943dd2b2e026f3caf3
|
1ae83a587edbc1edfd3a8f2220421b559dacbf60
|
refs/heads/main
| 2023-06-07T09:20:06.505313
| 2023-05-27T05:08:19
| 2023-05-27T05:08:19
| 373,472,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,524
|
py
|
################################################################
# Original By : Lumirayz/Lumz lumirayz@gmail.com
# Edited By : Agunq
# Use python3 because python2 is discontinued :(
# Require websocket-client module
# pip install websocket-client
################################################################
import websocket
import random
import re
import time
import urllib.request
import urllib.parse
import select
import threading
VERSION = "maddnes :s v2"
BigMessage_Multiple = 0
BigMessage_Cut = 1
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
MessageFlags = {
"PREMIUM" : 1 << 2 ,
"BG_ON" : 1 << 3 ,
"MEDIA_ON" : 1 << 4 ,
"CENSORED" : 1 << 5 ,
"SHOW_MOD_ICON" : 1 << 6 ,
"SHOW_STAFF_ICON" : 1 << 7 ,
"CHANNEL_RED" : 1 << 8 ,
"CHANNEL_ORANGE" : 1 << 9 ,
"CHANNEL_GREEN" : 1 << 10 ,
"CHANNEL_CYAN" : 1 << 11 ,
"CHANNEL_BLUE" : 1 << 12 ,
"CHANNEL_PURPLE" : 1 << 13 ,
"CHANNEL_PINK" : 1 << 14 ,
"CHANNEL_MOD" : 1 << 15
}
RoomFlags = {
"LIST_TAXONOMY" : 1 << 0 ,
"NO_ANONS" : 1 << 2 ,
"NO_FLAGGING" : 1 << 3 ,
"NO_COUNTER" : 1 << 4 ,
"NO_IMAGES" : 1 << 5 ,
"NO_LINKS" : 1 << 6 ,
"NO_VIDEOS" : 1 << 7 ,
"NO_STYLED_TEXT" : 1 << 8 ,
"NO_LINKS_CHATANGO" : 1 << 9 ,
"NO_BROADCAST_MSG_WITH_BW" : 1 << 10 ,
"RATE_LIMIT_REGIMEON" : 1 << 11 ,
"CHANNELS_DISABLED" : 1 << 13 ,
"NLP_SINGLEMSG" : 1 << 14 ,
"NLP_MSGQUEUE" : 1 << 15 ,
"BROADCAST_MODE" : 1 << 16 ,
"CLOSED_IF_NO_MODS" : 1 << 17 ,
"IS_CLOSED" : 1 << 18 ,
"SHOW_MOD_ICONS" : 1 << 19 ,
"MODS_CHOOSE_VISIBILITY" : 1 << 20 ,
"NLP_NGRAM" : 1 << 21 ,
"NO_PROXIES" : 1 << 22 ,
"HAS_XML" : 1 << 28 ,
"UNSAFE" : 1 << 29
}
ModeratorFlags = {
"DELETED" : 1 << 0 ,
"EDIT_MODS" : 1 << 1 ,
"EDIT_MOD_VISIBILITY" : 1 << 2 ,
"EDIT_BW" : 1 << 3 ,
"EDIT_RESTRICTIONS" : 1 << 4 ,
"EDIT_GROUP" : 1 << 5 ,
"SEE_COUNTER" : 1 << 6 ,
"SEE_MOD_CHANNEL" : 1 << 7 ,
"SEE_MOD_ACTIONS" : 1 << 8 ,
"EDIT_NLP" : 1 << 9 ,
"EDIT_GP_ANNC" : 1 << 10 ,
"EDIT_ADMINS" : 1 << 11 ,
"EDIT_SUPERMODS" : 1 << 12 ,
"NO_SENDING_LIMITATIONS" : 1 << 13 ,
"SEE_IPS" : 1 << 14 ,
"CLOSE_GROUP" : 1 << 15 ,
"CAN_BROADCAST" : 1 << 16 ,
"MOD_ICON_VISIBLE" : 1 << 17 ,
"IS_STAFF" : 1 << 18 ,
"STAFF_ICON_VISIBLE" : 1 << 19 ,
"UNBAN_ALL" : 1 << 20
}
def getFlag(n, FlagSet):
if n <= 0: return {}
newflag = FlagSet
f = FlagSet.values()
c = 0
bit = n
newset = {}
while bit != 0:
bit >>= 1
c += 1
tv = 1 << c
if tv not in f:
newflag[str(tv)] = tv
for flag, number in newflag.items():
if n & number :
newset[flag] = number
return newset
specials = {'tango-hyoo': 60,
'monosekai': 76,
'nico-nico': 29
}
tsweights = [['5', 75], ['6', 75], ['7', 75], ['8', 75], ['16', 75], ['17', 75], ['18', 75], ['9', 95], ['11', 95], ['12', 95], ['13', 95], ['14', 95], ['15', 95], ['19', 110], ['23', 110], ['24', 110], ['25', 110], ['26', 110], ['28', 104], ['29', 104], ['30', 104], ['31', 104], ['32', 104], ['33', 104], ['35', 101], ['36', 101], ['37', 101], ['38', 101], ['39', 101], ['40', 101], ['41', 101], ['42', 101], ['43', 101], ['44', 101], ['45', 101], ['46', 101], ['47', 101], ['48', 101], ['49', 101], ['50', 101], ['52', 110], ['53', 110], ['55', 110], ['57', 110], ['58', 110], ['59', 110], ['60', 110], ['61', 110], ['62', 110], ['63', 110], ['64', 110], ['65', 110], ['66', 110], ['68', 95], ['71', 116], ['72', 116], ['73', 116], ['74', 116], ['75', 116], ['76', 116], ['77', 116], ['78', 116], ['79', 116], ['80', 116], ['81', 116], ['82', 116], ['83', 116], ['84', 116]]
def getServer(group):
try:
sn = specials[group]
except KeyError:
group = group.replace("_", "q")
group = group.replace("-", "q")
fnv = float(int(group[0:min(5, len(group))], 36))
lnv = group[6: (6 + min(3, len(group) - 5))]
if(lnv):
lnv = float(int(lnv, 36))
lnv = max(lnv,1000)
else:
lnv = 1000
num = (fnv % lnv) / lnv
maxnum = sum(map(lambda x: x[1], tsweights))
cumfreq = 0
sn = 0
for wgt in tsweights:
cumfreq += float(wgt[1]) / maxnum
if(num <= cumfreq):
sn = int(wgt[0])
break
return "s" + str(sn) + ".chatango.com"
def genUid():
return str(random.randrange(10 ** 15, 10 ** 16))
def clean_message(msg):
n = re.search("<n(.*?)/>", msg)
if n: n = n.group(1)
f = re.search("<f(.*?)>", msg)
if f: f = f.group(1)
msg = re.sub("<n.*?/>", "", msg)
msg = re.sub("<f.*?>", "", msg)
msg = strip_html(msg)
msg = msg.replace("<", "<")
msg = msg.replace(">", ">")
msg = msg.replace(""", "\"")
msg = msg.replace("'", "'")
msg = msg.replace("&", "&")
return msg, n, f
def strip_html(msg):
msg = re.sub("<\/?[^>]*>", "", msg)
return msg
def parseNameColor(n):
return n
def parseFont(f):
try:
sizecolor, fontface = f.split("=", 1)
sizecolor = sizecolor.strip()
size = int(sizecolor[1:3])
col = sizecolor[3:6]
if col == "": col = None
face = f.split("\"", 2)[1]
return col, face, size
except:
return None, None, None
def getAnonId(n, ssid):
if n == None: n = "5504"
try:
return "".join(list(
map(lambda x: str(x[0] + x[1])[-1], list(zip(
list(map(lambda x: int(x), n)),
list(map(lambda x: int(x), ssid[4:]))
)))
))
except ValueError:
return "NNNN"
class PM:
def __init__(self, mgr):
self._auth_re = re.compile(r"auth\.chatango\.com ?= ?([^;]*)", re.IGNORECASE)
self._connected = False
self._reconnecting = False
self._mgr = mgr
self._auid = None
self._premium = False
self._blocklist = set()
self._contacts = set()
self._status = dict()
self._wlock = False
self._firstCommand = True
self._wbuf = ""
self._wlockbuf = ""
self._pingTask = None
if self._mgr:
self._connect()
def _getAuth(self, name, password):
data = urllib.parse.urlencode({
"user_id": name,
"password": password,
"storecookie": "on",
"checkerrors": "yes"
}).encode()
try:
resp = urllib.request.urlopen("https://chatango.com/login", data)
headers = resp.headers
except Exception:
return None
for header, value in headers.items():
if header.lower() == "set-cookie":
m = self._auth_re.search(value)
if m:
auth = m.group(1)
if auth == "":
return None
return auth
return None
def _auth(self):
self._auid = self._getAuth(self._mgr.name, self._mgr.password)
if self._auid == None:
self._websock.close()
self._callEvent("onLoginFail")
return False
self._sendCommand("tlogin", self._auid, "2")
self._setWriteLock(True)
return True
def connect(self):
self._connect()
def _connect(self):
try:
self._websock = websocket.WebSocket()
self._websock.connect('ws://%s:%s/' % (self._mgr._PMHost, self._mgr._PMPort), origin='https://st.chatango.com')
if not self._auth(): return
self._pingTask = self.mgr.setInterval(self.mgr._pingDelay, self.ping)
except Exception as e:
print(e)
def attemptReconnect(self):
try:
print("try reconect PM attempt %s" % (self._attempt))
self._reconnect()
except:
time.sleep(10)
self._attempt -= 1
if self._attempt > 0:
self.attemptReconnect()
else:
print("failed to reconnect PM")
self.disconnect()
def reconnect(self):
self._attempt = 3
self.attemptReconnect()
def _reconnect(self):
self._reconnecting = True
if self._connected:
self._disconnect()
self._connect()
self._reconnecting = False
self._callEvent("onPMReconnect")
def disconnect(self):
self._disconnect()
self._callEvent("onPMDisconnect")
def _disconnect(self):
self._connected = False
self._pingTask.cancel()
self._websock.close()
def _feed(self, data):
data = data.split(b"\x00")
for food in data:
self._process(food.decode(errors="replace").rstrip("\r\n").replace("'", "'"))
def _process(self, data):
self._callEvent("onRaw", data)
data = data.split(":")
cmd, args = data[0], data[1:]
#print(cmd, args)
func = "rcmd_" + cmd
if hasattr(self, func):
getattr(self, func)(args)
def getManager(self): return self._mgr
def getContacts(self): return self._contacts
def getBlocklist(self): return self._blocklist
mgr = property(getManager)
contacts = property(getContacts)
blocklist = property(getBlocklist)
def rcmd_OK(self, args):
self._connected = True
self._setWriteLock(False)
self._sendCommand("wl")
self._sendCommand("getblock")
self._sendCommand("getpremium", "1")
self._callEvent("onPMConnect")
def rcmd_wl(self, args):
self._contacts = set()
for i in range(len(args) // 4):
name, last_on, is_on, idle = args[i * 4: i * 4 + 4]
user = User(name)
if last_on=="None":pass
elif is_on == "off": self._status[user] = [int(last_on), "offline", 0]
elif is_on == "app": self._status[user] = [int(last_on), "app", 0]
elif idle == '0' and is_on == "on": self._status[user] = [int(last_on), "online", 0]
else: self._status[user] = [int(last_on), "online", time.time() - int(idle) * 60]
self._contacts.add(user)
self._callEvent("onPMContactlistReceive")
def rcmd_wladd(self, args):
name, is_on, last_on = args
user = User(name)
if is_on == "on":
if last_on == '0':
idle = 0
else:
idle = time.time() - int(last_on) * 60
last_on = 0
is_on = "online"
elif is_on == "app":
if last_on == '0':
idle = 0
else:
idle = time.time() - int(last_on) * 60
last_on = 0
is_on = "app"
else:
idle = 0
if last_on == "None":
last_on = 0
else:
last_on = int(last_on)
is_on = "offline"
self._status[user] = [last_on, is_on, idle]
if user not in self._contacts:
self._contacts.add(user)
self._callEvent("onPMContactAdd", user)
def rcmd_wldelete(self, args):
user = User(args[0])
if user in self._contacts:
self._contacts.remove(user)
self._callEvent("onPMContactRemove", user)
def rcmd_block_list(self, args):
self._blocklist = set()
for name in args:
if name == "": continue
self._blocklist.add(User(name))
def rcmd_idleupdate(self, args):
user = User(args[0])
if user in self._status:
last_on, is_on, idle = self._status[user]
if args[1] == '1':
self._status[user] = [last_on, is_on, 0]
else:
self._status[user] = [last_on, is_on, time.time()]
def rcmd_track(self, args):
user = User(args[0])
is_on = args[2]
if is_on == "online":
if args[1] == '0':
idle = 0
else:
idle = time.time() - float(args[1]) * 60
last_on = idle
else:
last_on = float(args[1])
idle = 0
self._status[user] = [last_on, is_on, idle]
def rcmd_status(self, args):
user = User(args[0])
is_on = args[2]
if is_on == "online":
if args[1] == '0':
idle = 0
else:
idle = time.time() - float(args[1]) * 60
last_on = idle
else:
last_on = float(args[1])
idle = 0
self._status[user] = [last_on, is_on, idle]
def rcmd_connect(self, args):
user = User(args[0])
is_on = args[2]
if is_on == "online":
if args[1] == '0':
idle = 0
else:
idle = time.time() - float(args[1]) * 60
last_on = idle
else:
last_on = float(args[1])
idle = 0
self._status[user] = [last_on, is_on, idle]
def rcmd_DENIED(self, args):
self._disconnect()
self._callEvent("onLoginFail")
def rcmd_msg(self, args):
user = User(args[0])
body = strip_html(":".join(args[5:]))
self._callEvent("onPMMessage", user, body)
def rcmd_msgoff(self, args):
user = User(args[0])
body = strip_html(":".join(args[5:]))
self._callEvent("onPMOfflineMessage", user, body)
def rcmd_wlonline(self, args):
user = User(args[0])
last_on = float(args[1])
self._status[user] = [last_on,"online",last_on]
self._callEvent("onPMContactOnline", user)
def rcmd_wlapp(self, args):
user = User(args[0])
last_on = float(args[1])
self._status[user] = [last_on,"app",last_on]
self._callEvent("onPMContactApp", user)
def rcmd_wloffline(self, args):
user = User(args[0])
last_on = float(args[1])
self._status[user] = [last_on,"offline",0]
self._callEvent("onPMContactOffline", user)
def rcmd_premium(self, args):
if float(args[1]) > time.time():
self._premium = True
self.setBgMode(1)
self.setRecordingMode(1)
else:
self._premium = False
def rcmd_kickingoff(self, args):
self.disconnect()
def rcmd_toofast(self, args):
self.disconnect()
def rcmd_unblocked(self, user):
if user in self._blocklist:
self._blocklist.remove(user)
self._callEvent("onPMUnblock", user)
def ping(self):
self._sendCommand("")
self._callEvent("onPMPing")
def message(self, user, msg):
if msg!=None:
msg = "<n%s/><m v=\"1\"><g x%ss%s=\"%s\">%s</g></m>" % (self._mgr.user.nameColor.lower(), self._mgr.user.fontSize, self._mgr.user.fontColor.lower(), self._mgr.user.fontFace, msg)
self._sendCommand("msg", user.name, msg)
def addContact(self, user):
if user not in self._contacts:
self._sendCommand("wladd", user.name)
def removeContact(self, user):
if user in self._contacts:
self._sendCommand("wldelete", user.name)
def block(self, user):
if user not in self._blocklist:
self._sendCommand("block", user.name, user.name, "S")
self._blocklist.add(user)
self._callEvent("onPMBlock", user)
def unblock(self, user):
if user in self._blocklist:
self._sendCommand("unblock", user.name)
def setBgMode(self, mode):
self._sendCommand("msgbg", str(mode))
def setRecordingMode(self, mode):
self._sendCommand("msgmedia", str(mode))
def setIdle(self):
self._sendCommand("idle:0")
def setActive(self):
self._sendCommand("idle:1")
def rawTrack(self, user):
cmd = self._sendCommand("track" , user.name)
self._websock.send(cmd)
op, data = self._websock.recv_data()
if(len(data) > 0):
self._feed(data)
def track(self, user):
self.rawTrack(user)
return self._status[user]
def checkOnline(self, user):
if user in self._status:
return self._status[user][1]
else:
return None
def getIdle(self, user):
if not user in self._status: return None
if not self._status[user][1]: return 0
if not self._status[user][2]: return time.time()
else: return self._status[user][2]
def _callEvent(self, evt, *args, **kw):
getattr(self.mgr, evt)(self, *args, **kw)
self.mgr.onEventCalled(self, evt, *args, **kw)
def _write(self, data):
if self._wlock:
self._wlockbuf += data
else:
self._wbuf += data
def _setWriteLock(self, lock):
self._wlock = lock
if self._wlock == False:
self._write(self._wlockbuf)
self._wlockbuf = ""
def _sendCommand(self, *args):
if self._firstCommand:
terminator = "\x00"
self._firstCommand = False
else:
terminator = "\r\n\x00"
cmd = ":".join(args) + terminator
self._write(cmd)
return cmd
class Room:
def __init__(self, room, uid = None, server = None, port = None, mgr = None):
self._name = room
self._server = server or getServer(room)
self._port = port or 8081 #1800/8080
self._mgr = mgr
self._wlock = False
self._firstCommand = True
self._wbuf = ""
self._wlockbuf = ""
self._connected = False
self._reconnecting = False
self._uid = uid or genUid()
self._channel = "0"
self._owner = None
self._mods = dict()
self._mqueue = dict()
self._history = list()
self._status = dict()
self._connectAmmount = 0
self._premium = False
self._userCount = 0
self._pingTask = None
self._users = dict()
self._msgs = dict()
self._silent = False
self._banlist = dict()
self._unbanlist = dict()
self._bannedwords = list()
if self._mgr:
self._mgr._rooms[self.name] = self
self._connect()
def getMessage(self, mid):
return self._msgs.get(mid)
def createMessage(self, msgid, **kw):
if msgid not in self._msgs:
msg = Message(msgid = msgid, **kw)
self._msgs[msgid] = msg
else:
msg = self._msgs[msgid]
return msg
def connect(self):
self.connect()
def _connect(self):
self._websock = websocket.WebSocket()
self._websock.connect('wss://%s:%s/' % (self._server, self._port), origin='https://st.chatango.com', header = { "Pragma" : "no-cache", "Cache-Control" : "no-cache" })
self._auth()
self._pingTask = self.mgr.setInterval(self.mgr._pingDelay, self.ping)
def attemptReconnect(self):
try:
print("try reconect %s attempt %s" % (self.name, self._attempt))
self._reconnect()
except:
time.sleep(10)
self._attempt -= 1
if self._attempt > 0:
self.attemptReconnect()
else:
print("failed to reconnect %s" % self.name)
self.disconnect()
def reconnect(self):
self._attempt = 3
self.attemptReconnect()
def _reconnect(self):
self._reconnecting = True
if self._connected:
self._disconnect()
self._connect()
self._reconnecting = False
def disconnect(self):
self._disconnect()
self._callEvent("onDisconnect")
def _disconnect(self):
self._connected = False
self._pingTask.cancel()
self._websock.close()
if not self._reconnecting:
if self.name in self.mgr._rooms:
del self.mgr._rooms[self.name]
def _auth(self):
if self._mgr._name and self._mgr._password:
self._sendCommand("bauth", self._name, self._uid, self._mgr._name, self._mgr._password)
else:
self._sendCommand("bauth", self._name, self._uid)
self._setWriteLock(True)
def login(self, name, password=None):
if password != None:
self._sendCommand("blogin", name, password)
else:
self._sendCommand("blogin", name)
def logout(self):
self._sendCommand("blogout")
def getName(self): return self._name
def getManager(self): return self._mgr
def getUserlist(self):
ul = []
for data in self._status.values():
user = data[0]
if user not in ul:
ul.append(user)
return ul
def _getUserlist(self):
ul = []
for data in self._status.values():
user = data[0]
if user.name[0] not in ["!", "#"]:
ul.append(user)
return ul
def getUserNames(self):
ul = self.getUserlist()
return list(map(lambda x: x.name, ul))
def getOwner(self): return self._owner
def getOwnerName(self): return self._owner.name
def getMods(self):
newset = set()
for mod in self._mods.keys():
newset.add(mod)
return newset
def getModNames(self):
mods = self._mods.keys()
newset = list()
for x in mods:
newset.append(x.name)
return newset
def getUserCount(self):
if self._userCount == 0:
return len(self.getUserlist())
else:
return self._userCount
def getSilent(self): return self._silent
def setSilent(self, val): self._silent = val
def getBanlist(self): return list(self._banlist.keys())
def getUnBanlist(self): return [(record["target"], record["src"]) for record in self._unbanlist.values()]
def getBannedWords(self): return self._bannedwords
def getFlags(self): return self._flags
name = property(getName)
mgr = property(getManager)
userlist = property(getUserlist)
_userlist = property(_getUserlist)
usernames = property(getUserNames)
owner = property(getOwner)
ownername = property(getOwnerName)
mods = property(getMods)
modnames = property(getModNames)
usercount = property(getUserCount)
silent = property(getSilent, setSilent)
banlist = property(getBanlist)
unbanlist = property(getUnBanlist)
bannedwords = property(getBannedWords)
flags = property(getFlags)
def _feed(self, data):
data = data.split(b"\x00")
for food in data:
self._process(food.decode(errors="replace").rstrip("\r\n").replace("'", "'"))
def _process(self, data):
self._callEvent("onRaw", data)
data = data.split(":")
cmd, args = data[0], data[1:]
#print(cmd, args)
func = "rcmd_" + cmd
if hasattr(self, func):
getattr(self, func)(args)
def rcmd_ok(self, args):
self._connected = True
self._attempt = 0
if args[2] == "C" and self._mgr._password == None and self._mgr._name == None:
n = args[4].rsplit('.', 1)[0]
n = n[-4:]
pid = args[1][0:8]
name = "!anon" + getAnonId(n, pid)
self._mgr.user._nameColor = n
elif args[2] == "C" and self._mgr._password == None:
self.login(self._mgr._name)
elif args[2] != "M":
self._callEvent("onLoginFail")
self.disconnect()
self._owner = User(args[0])
self._uid = args[1]
self._aid = args[1][4:8]
if len(args[6]) > 0:
self._mods = dict((x,y) for x, y in list(map(lambda x: (User(x.split(",")[0]), getFlag(int(x.split(",")[1]), ModeratorFlags)), args[6].split(";"))))
self._flags = getFlag(int(args[7]), RoomFlags)
self._i_log = list()
def rcmd_groupflagsupdate(self, args):
old_flags = set(self._flags.items())
self._flags = getFlag(int(args[0]), RoomFlags)
new_flags = set(self._flags.items())
add_flags = new_flags - old_flags
if len(add_flags) > 0:
self._callEvent("onGroupFlagsAdded", dict(add_flags))
remove_flags = old_flags - new_flags
if len(remove_flags) > 0:
self._callEvent("onGroupFlagsRemoved", dict(remove_flags))
self._callEvent("onGroupFlagsUpdate")
def rcmd_denied(self, args):
self._disconnect()
self._callEvent("onConnectFail")
def rcmd_inited(self, args):
self._sendCommand("g_participants", "start")
self._sendCommand("getpremium", "1")
self._sendCommand("getbannedwords")
self._sendCommand("getratelimit")
self.requestBanlist()
self.requestUnBanlist()
if self._connectAmmount == 0:
self._callEvent("onConnect")
for msg in reversed(self._i_log):
user = msg.user
self._callEvent("onHistoryMessage", user, msg)
self._addHistory(msg)
del self._i_log
else:
self._callEvent("onReconnect")
self._connectAmmount += 1
self._setWriteLock(False)
def rcmd_getratelimit(self, args):
pass
def rcmd_bw(self, args):
for word in args:
words = urllib.parse.unquote(word).split(",")
for word in words:
if word not in self._bannedwords and len(word) > 0:
self._bannedwords.append(word)
bannedwords = self._bannedwords
self._callEvent("onBannedWordsUpdated", bannedwords)
def rcmd_premium(self, args):
if float(args[1]) > time.time():
self._premium = True
if self._mgr.user._mbg: self.setBgMode(1)
if self._mgr.user._mrec: self.setRecordingMode(1)
else:
self._premium = False
def rcmd_mods(self, args):
modnames = args
mods = dict((x, y) for x, y in list(map(lambda x: (User(x.split(",")[0]), getFlag(int(x.split(",")[1]), ModeratorFlags)), modnames)))
curmods = mods.keys()
premods = self._mods.keys()
for user in curmods - premods: #modded
self._callEvent("onModAdd", user)
for user in premods - curmods: #demodded
self._callEvent("onModRemove", user)
self._callEvent("onModChange")
self._mods = mods
def rcmd_b(self, args):
mtime = float(args[0])
channel = args[7]
puid = args[3]
ip = args[6]
name = args[1]
rawmsg = ":".join(args[8:])
msg, n, f = clean_message(rawmsg)
if name == "":
nameColor = None
name = "#" + args[2]
if name == "#":
name = "!anon" + getAnonId(n, puid)
else:
if n: nameColor = parseNameColor(n)
else: nameColor = None
i = args[5]
unid = args[4]
user = User(name)
if puid:
user.updatePuid(puid)
if f: fontColor, fontFace, fontSize = parseFont(f)
else: fontColor, fontFace, fontSize = None, None, None
msg = Message(
time = mtime,
channel = channel,
user = user,
body = msg[1:],
raw = rawmsg[1:],
uid = puid,
ip = ip,
nameColor = nameColor,
fontColor = fontColor,
fontFace = fontFace,
fontSize = fontSize,
unid = unid,
room = self
)
self._mqueue[i] = msg
def rcmd_u(self, args):
temp = Struct(**self._mqueue)
if hasattr(temp, args[0]):
msg = getattr(temp, args[0])
if msg.user != self.mgr.user:
msg.user._fontColor = msg.fontColor
msg.user._fontFace = msg.fontFace
msg.user._fontSize = msg.fontSize
msg.user._nameColor = msg.nameColor
del self._mqueue[args[0]]
msg.attach(self, args[1])
self._addHistory(msg)
self._channel = msg.channel
self._callEvent("onMessage", msg.user, msg)
def rcmd_i(self, args):
mtime = float(args[0])
channel = args[7]
puid = args[3]
ip = args[6]
if ip == "": ip = None
name = args[1]
rawmsg = ":".join(args[8:])
msg, n, f = clean_message(rawmsg)
msgid = args[5]
if name == "":
nameColor = None
name = "#" + args[2]
if name == "#":
name = "!anon" + getAnonId(n, puid)
else:
if n: nameColor = parseNameColor(n)
else: nameColor = None
if f: fontColor, fontFace, fontSize = parseFont(f)
else: fontColor, fontFace, fontSize = None, None, None
user = User(name)
if puid:
user.updatePuid(puid)
msg = self.createMessage(
msgid = msgid,
time = mtime,
channel = channel,
user = user,
body = msg,
raw = rawmsg,
ip = args[6],
unid = args[4],
nameColor = nameColor,
fontColor = fontColor,
fontFace = fontFace,
fontSize = fontSize,
room = self
)
self._i_log.append(msg)
def rcmd_g_participants(self, args):
args = ":".join(args)
args = args.split(";")
for data in args:
data = data.split(":")
sid = data[0]
usertime = float(data[1])
name = data[3]
puid = data[2]
if name.lower() == "none":
n = str(int(usertime))[-4:]
if data[4].lower() == "none":
name = "!anon" + getAnonId(n, puid)
else:
name = "#" + data[4]
user = User(name)
if puid:
user.updatePuid(puid)
user.addSessionId(self, sid)
if sid not in self._status:
self._status[sid] = [user, usertime, data[2], data[3], data[4]]
def rcmd_participant(self, args):
name = args[3]
sid = args[1]
usertime = float(args[6])
puid = args[2]
if name.lower() == "none":
n = str(int(usertime))[-4:]
if args[4].lower() == "none":
name = "!anon" + getAnonId(n, puid)
else:
name = "#" + args[4]
user = User(name)
if puid:
user.updatePuid(puid)
if args[0] == "0": #leave
user.removeSessionId(self, sid)
if sid in self._status:
del self._status[sid]
self._callEvent("onLeave", user, puid)
if args[0] == "1" or args[0] == "2": #join
user.addSessionId(self, sid)
self._status[sid] = [user, usertime, args[2], args[3], args[4]]
self._callEvent("onJoin", user, puid)
def rcmd_show_fw(self, args):
self._callEvent("onFloodWarning")
def rcmd_show_tb(self, args):
self._callEvent("onFloodBan")
def rcmd_tb(self, args):
self._callEvent("onFloodBanRepeat")
def rcmd_delete(self, args):
msg = self.getMessage(args[0])
if msg:
if msg in self._history:
self._history.remove(msg)
self._callEvent("onMessageDelete", msg.user, msg)
msg.detach()
def rcmd_deleteall(self, args):
for msgid in args:
self.rcmd_delete([msgid])
def rcmd_n(self, args):
self._userCount = int(args[0], 16)
self._callEvent("onUserCountChange")
def rcmd_blocklist(self, args):
self._banlist = dict()
sections = ":".join(args).split(";")
for section in sections:
params = section.split(":")
if len(params) != 5: continue
if params[2] == "": continue
user = User(params[2])
self._banlist[user] = {
"unid":params[0],
"ip":params[1],
"target":user,
"time":float(params[3]),
"src":User(params[4])
}
self._callEvent("onBanlistUpdate")
def rcmd_unblocklist(self, args):
self._unbanlist = dict()
sections = ":".join(args).split(";")
for section in sections:
params = section.split(":")
if len(params) != 5: continue
if params[2] == "": continue
user = User(params[2])
self._unbanlist[user] = {
"unid":params[0],
"ip":params[1],
"target":user,
"time":float(params[3]),
"src":User(params[4])
}
self._callEvent("onUnBanlistUpdate")
def rcmd_blocked(self, args):
try:
msg = self.getLastMessageByIp(args[1])
if args[2] == "":
target = msg.user
else:
target = User(args[2])
user = User(args[3])
self._banlist[target] = {"unid":args[0], "ip":args[1], "target":target, "time":float(args[4]), "src":user}
self._callEvent("onBan", user, target)
self.requestBanlist()
except Exception as e:
print(e)
def rcmd_unblocked(self, args):
if args[2] == "": return
target = User(args[2])
user = User(args[3])
try:
del self._banlist[target]
except:
return
self._unbanlist[user] = {"unid":args[0], "ip":args[1], "target":target, "time":float(args[4]), "src":user}
self._callEvent("onUnban", user, target)
self.requestUnBanlist()
def ping(self):
self._sendCommand("")
self._callEvent("onPing")
def rawMessage(self, channel, msg):
msg = "<n" + self._mgr.user.nameColor + "/>" + msg
msg = "<f x%0.2i%s=\"%s\">" %(self._mgr.user.fontSize, self._mgr.user.fontColor, self._mgr.user.fontFace) + msg
if not self._silent:
self._sendCommand("bm", "tl2r", channel, msg)
def message(self, msg, html = False, channel = None):
if msg == None:
return
if not html:
msg = msg.replace("<", "<").replace(">", ">")
if len(msg) > self.mgr._maxLength:
if self.mgr._tooBigMessage == BigMessage_Cut:
self.message(msg[:self.mgr._maxLength], html = html)
elif self.mgr._tooBigMessage == BigMessage_Multiple:
while len(msg) > 0:
sect = msg[:self.mgr._maxLength]
msg = msg[self.mgr._maxLength:]
self.message(sect, html = html)
return
if channel == None:
channel = self._channel
self.rawMessage(channel, msg)
def setBgMode(self, mode):
self._sendCommand("msgbg", str(mode))
def setRecordingMode(self, mode):
self._sendCommand("msgmedia", str(mode))
def addBadWord(self, word):
if self.getLevel(self._mgr.user) == 2:
self._bannedwords.append(word)
self._sendCommand("setbannedwords", "403", ", ".join(self._bannedwords))
def removeBadWord(self, word):
if self.getLevel(self._mgr.user) == 2:
self._bannedwords.remove(word)
self._sendCommand("setbannedwords", "403", ", ".join(self._bannedwords))
def addMod(self, user):
if self.getLevel(self._mgr.user) == 2:
self._sendCommand("addmod", user.name)
def removeMod(self, user):
if self.getLevel(self._mgr.user) == 2:
self._sendCommand("removemod", user.name)
def flag(self, message):
self._sendCommand("g_flag", message.msgid)
def flagUser(self, user):
msg = self.getLastMessage(user)
if msg:
self.flag(msg)
return True
return False
def delete(self, message):
if self.getLevel(self._mgr.user) > 0:
self._sendCommand("delmsg", message.msgid)
def rawClearUser(self, unid, ip, user):
self._sendCommand("delallmsg", unid, ip, user)
def clearUser(self, user):
if self.getLevel(self._mgr.user) > 0:
msg = self.getLastMessage(user)
if msg:
if user.name[0] in ["!","#"]:
username = ""
else:
username = user.name
self.rawClearUser(msg.unid, msg.ip, username)
return True
return False
def clearall(self):
self._sendCommand("clearall")
if self.getLevel(self._mgr.user) == 1:
for msg in self._history:
self.clearUser(msg.user)
self._callEvent("onClearAll")
def rawBan(self, name, ip, unid):
self._sendCommand("block", unid, ip, name)
def ban(self, msg):
if self.getLevel(self._mgr.user) > 0:
if msg.user.name[0] in ["!","#"]:
username = ""
else:
username = msg.user.name
self.rawBan(username, msg.ip, msg.unid)
def banUser(self, user):
msg = self.getLastMessage(user)
if msg:
self.ban(msg)
return True
return False
def requestBanlist(self):
self._sendCommand("blocklist", "block", "", "next", "500")
def requestUnBanlist(self):
self._sendCommand("blocklist", "unblock", "", "next", "500")
def rawUnban(self, name, ip, unid):
self._sendCommand("removeblock", unid, ip, name)
def unban(self, user):
rec = self._getBanRecord(user)
if rec:
self.rawUnban(rec["target"].name, rec["ip"], rec["unid"])
return True
else:
return False
def _getBanRecord(self, user):
if user in self._banlist:
return self._banlist[user]
return None
def _callEvent(self, evt, *args, **kw):
getattr(self.mgr, evt)(self, *args, **kw)
self.mgr.onEventCalled(self, evt, *args, **kw)
def _write(self, data):
if self._wlock:
self._wlockbuf += data
else:
self._wbuf += data
def _setWriteLock(self, lock):
self._wlock = lock
if self._wlock == False:
self._write(self._wlockbuf)
self._wlockbuf = ""
def _sendCommand(self, *args):
if self._firstCommand:
terminator = "\x00"
self._firstCommand = False
else:
terminator = "\r\n\x00"
self._write(":".join(args) + terminator)
def getLevel(self, user):
if user == self._owner: return 2
if user in self._mods: return 1
return 0
def getBadge(self, msg):
channel = int(msg.channel)
badge = getFlag(channel, MessageFlags)
if "SHOW_MOD_ICON" in badge.keys() or "SHOW_STAFF_ICON" in badge.keys():
return 1
else:
return 0
def getLastMessage(self, user = None):
if user:
try:
i = 1
while True:
msg = self._history[-i]
if msg.user == user:
return msg
i += 1
except IndexError:
return None
else:
try:
return self._history[-1]
except IndexError:
return None
return None
def getLastMessageByIp(self, ip = None):
if ip:
try:
i = 1
while True:
msg = self._history[-i]
if msg.ip == ip:
return msg
i += 1
except IndexError:
return None
else:
try:
return self._history[-1]
except IndexError:
return None
return None
def findUser(self, name):
name = name.lower()
ul = self.getUserlist()
udi = dict(zip([u.name for u in ul], ul))
cname = None
for n in udi.keys():
if n.find(name) != -1:
if cname: return None
cname = n
if cname: return udi[cname]
else: return None
def _addHistory(self, msg):
self._history.append(msg)
if self.getBadge(msg) > 0:
if msg.user not in self._mods.keys() and msg.user != self._owner:
self._mods[msg.user] = {}
if len(self._history) > self.mgr._maxHistoryLength:
rest, self._history = self._history[:-self.mgr._maxHistoryLength], self._history[-self.mgr._maxHistoryLength:]
for msg in rest:
msg.detach()
class RoomManager:
_Room = Room
_PM = PM
_PMHost = "c1.chatango.com"
_PMPort = 8080 #1800/8080
_pingDelay = 20
_tooBigMessage = BigMessage_Multiple
_maxLength = 2800
_maxHistoryLength = 150
def __init__(self, name = None, password = None, pm = True):
self._name = name
self._password = password
self._tasks = set()
self._rooms = dict()
self._running = False
if pm:
conn = self._PM(mgr = self)
self._pm = conn
else:
self._pm = None
def joinThread(self, room):
self._Room(room, mgr = self)
def joinRoom(self, room):
room = room.lower()
if room not in self._rooms:
self.joinThread(room)
def leaveRoom(self, room):
room = room.lower()
if room in self._rooms:
con = self._rooms[room]
con.disconnect()
def getRoom(self, room):
room = room.lower()
if room in self._rooms:
return self._rooms[room]
else:
return None
def getUser(self): return User(self._name)
def getName(self): return self._name
def getPassword(self): return self._password
def getRooms(self): return set(self._rooms.values())
def getRoomNames(self): return set(self._rooms.keys())
def getPM(self): return self._pm
user = property(getUser)
name = property(getName)
password = property(getPassword)
rooms = property(getRooms)
roomnames = property(getRoomNames)
pm = property(getPM)
def onInit(self):
pass
def onConnect(self, room):
pass
def onReconnect(self, room):
pass
def onConnectFail(self, room):
pass
def onDisconnect(self, room):
pass
def onLoginFail(self, room):
pass
def onGroupFlagsUpdate(self, room):
pass
def onGroupFlagsAdded(self, room, flags):
pass
def onGroupFlagsRemoved(self, room, flags):
pass
def onFloodBan(self, room):
pass
def onFloodBanRepeat(self, room):
pass
def onFloodWarning(self, room):
pass
def onMessageDelete(self, room, user, message):
pass
def onModChange(self, room):
pass
def onModAdd(self, room, user):
pass
def onClearAll(self, room):
pass
def onModRemove(self, room, user):
pass
def onMessage(self, room, user, message):
pass
def onBannedWordsUpdated(self, room, words):
pass
def onHistoryMessage(self, room, user, message):
pass
def onJoin(self, room, user, puid):
pass
def onLeave(self, room, user, puid):
pass
def onRaw(self, room, raw):
pass
def onPing(self, room):
pass
def onUserCountChange(self, room):
pass
def onBan(self, room, user, target):
pass
def onUnban(self, room, user, target):
pass
def onBanlistUpdate(self, room):
pass
def onUnBanlistUpdate(self, room):
pass
def onPMConnect(self, pm):
pass
def onPMReconnect(self, pm):
pass
def onPMDisconnect(self, pm):
pass
def onPMPing(self, pm):
pass
def onPMMessage(self, pm, user, body):
pass
def onPMOfflineMessage(self, pm, user, body):
pass
def onPMContactlistReceive(self, pm):
pass
def onPMBlocklistReceive(self, pm):
pass
def onPMContactAdd(self, pm, user):
pass
def onPMContactRemove(self, pm, user):
pass
def onPMBlock(self, pm, user):
pass
def onPMUnblock(self, pm, user):
pass
def onPMContactOnline(self, pm, user):
pass
def onPMContactApp(self, pm, user):
pass
def onPMContactOffline(self, pm, user):
pass
def onEventCalled(self, room, evt, *args, **kw):
pass
class _Task:
def cancel(self):
self.mgr.removeTask(self)
def _tick(self):
now = time.time()
for task in set(self._tasks):
if task.target <= now:
task.func(*task.args, **task.kw)
if task.isInterval:
task.target = now + task.timeout
else:
self._tasks.remove(task)
def setTimeout(self, timeout, func, *args, **kw):
task = self._Task()
task.mgr = self
task.target = time.time() + timeout
task.timeout = timeout
task.func = func
task.isInterval = False
task.args = args
task.kw = kw
self._tasks.add(task)
return task
def setInterval(self, timeout, func, *args, **kw):
task = self._Task()
task.mgr = self
task.target = time.time() + timeout
task.timeout = timeout
task.func = func
task.isInterval = True
task.args = args
task.kw = kw
self._tasks.add(task)
return task
def removeTask(self, task):
self._tasks.remove(task)
@classmethod
def easy_start(cl, rooms = None, name = None, password = None, pm = True):
try:
if not rooms: rooms = str(input("Room names separated by semicolons: ")).split(";")
if len(rooms) == 1 and rooms[0] == "": rooms = []
if name == None: name = str(input("User name: "))
if name == "" or " " in name: name = None
if password == None: password = str(input("User password: "))
if password == "": password = None
if name == None or password == None: pm = False
self = cl(name, password, pm = pm)
self.rooms_copy = rooms
if len(self.rooms_copy)>0:
for room in self.rooms_copy:
self.joinRoom(room)
self.main()
except Exception as e:
print(str(e))
def main(self):
self.onInit()
self._running = True
while self._running:
conns = list(self._rooms.values())
if self.pm:
conns.append(self.pm)
socks = [x._websock.sock for x in conns if x._websock.sock != None ]
wsocks = [x._websock.sock for x in conns if x._wbuf != "" and x._websock.sock != None]
rd, wr, sp = select.select(socks, wsocks, [], 0.1)
for sock in rd:
con = [c for c in conns if c._websock.sock == sock][0]
try:
op, data = con._websock.recv_data()
if(len(data) > 0):
#print(data)
con._feed(data)
except Exception as e:
print(str(e))
#pass
con.reconnect()
for sock in wr:
con = [c for c in conns if c._websock.sock == sock][0]
try:
size = con._websock.send(con._wbuf)
con._wbuf = con._wbuf[size:]
except Exception as e:
print(str(e))
#pass
con.reconnect()
self._tick()
def stop(self):
self._running = False
conns = list(self._rooms.values())
if self.pm:
conns.append(self.pm)
for conn in conns:
conn.disconnect()
def enableBg(self):
self.user._mbg = True
for room in self.rooms:
room.setBgMode(1)
def disableBg(self):
self.user._mbg = False
for room in self.rooms:
room.setBgMode(0)
def enableRecording(self):
self.user._mrec = True
for room in self.rooms:
room.setRecordingMode(1)
def disableRecording(self):
self.user._mrec = False
for room in self.rooms:
room.setRecordingMode(0)
def setNameColor(self, color3x):
self.user._nameColor = color3x
def setFontColor(self, color3x):
self.user._fontColor = color3x
def setFontFace(self, face):
self.user._fontFace = face
def setFontSize(self, size):
if size < 9: size = 9
if size > 22: size = 22
self.user._fontSize = size
_users = dict()
def User(name):
if name == None: name = ""
user = _users.get(name.lower())
if not user:
user = _User(name)
_users[name.lower()] = user
return user
class _User:
def __init__(self, name):
self._name = name.lower()
self._raw = name
self._puid = ""
self._sids = dict()
self._msgs = list()
self._nameColor = "000"
self._fontSize = 12
self._fontFace = "0"
self._fontColor = "000"
self._mbg = False
self._mrec = False
def getName(self): return self._name
def getRaw(self): return self._raw
def getPuid(self): return self._puid
def getSessionIds(self, room = None):
if room:
return self._sids.get(room, set())
else:
return set.union(*self._sids.values())
def getRooms(self): return self._sids.keys()
def getRoomNames(self): return [room.name for room in self.getRooms()]
def getFontColor(self): return self._fontColor
def getFontFace(self): return self._fontFace
def getFontSize(self): return self._fontSize
def getNameColor(self): return self._nameColor
name = property(getName)
raw = property(getRaw)
puid = property(getPuid)
sids = property(getSessionIds)
rooms = property(getRooms)
roomnames = property(getRoomNames)
fontColor = property(getFontColor)
fontFace = property(getFontFace)
fontSize = property(getFontSize)
nameColor = property(getNameColor)
def addSessionId(self, room, sid):
if room not in self._sids:
self._sids[room] = set()
self._sids[room].add(sid)
def removeSessionId(self, room, sid):
try:
self._sids[room].remove(sid)
if len(self._sids[room]) == 0:
del self._sids[room]
except KeyError:
pass
def clearSessionIds(self, room):
try:
del self._sids[room]
except KeyError:
pass
def hasSessionId(self, room, sid):
try:
if sid in self._sids[room]:
return True
else:
return False
except KeyError:
return False
def updatePuid(self, puid):
self._puid = puid
def __repr__(self):
return "<User: %s>" %(self.name)
class Message:
def attach(self, room, msgid):
if self._msgid == None:
self._room = room
self._msgid = msgid
self._room._msgs[msgid] = self
def detach(self):
if self._msgid != None and self._msgid in self._room._msgs:
del self._room._msgs[self._msgid]
self._msgid = None
def __init__(self, **kw):
self._msgid = None
self._time = None
self._channel = None
self._user = None
self._body = None
self._room = None
self._raw = ""
self._ip = None
self._unid = ""
self._nameColor = "000"
self._fontSize = 12
self._fontFace = "0"
self._fontColor = "000"
for attr, val in kw.items():
if val == None: continue
setattr(self, "_" + attr, val)
def getId(self): return self._msgid
def getTime(self): return self._time
def getChannel(self): return self._channel
def getUser(self): return self._user
def getBody(self): return self._body
def getUid(self): return self._uid
def getIP(self): return self._ip
def getFontColor(self): return self._fontColor
def getFontFace(self): return self._fontFace
def getFontSize(self): return self._fontSize
def getNameColor(self): return self._nameColor
def getRoom(self): return self._room
def getRaw(self): return self._raw
def getUnid(self): return self._unid
msgid = property(getId)
time = property(getTime)
channel = property(getChannel)
user = property(getUser)
body = property(getBody)
uid = property(getUid)
room = property(getRoom)
ip = property(getIP)
fontColor = property(getFontColor)
fontFace = property(getFontFace)
fontSize = property(getFontSize)
raw = property(getRaw)
nameColor = property(getNameColor)
unid = property(getUnid)
|
[
"noreply@github.com"
] |
TubuOwl.noreply@github.com
|
a8c71dd3a1929762af1b24e53fa169f9e5a1bf40
|
cbff06d328b57ef1203aa72d1a95b9a6f6247477
|
/my_first_project/settings.py
|
bc3bcfa995db56ab19fc1ea6a683f27fdbdd2ae8
|
[] |
no_license
|
forward995/django-api
|
934736eefb94569646f3288963c06cbf8c7bb035
|
d84248a932930699d2ceaaf79eda3062b948014e
|
refs/heads/master
| 2021-11-04T13:37:31.606704
| 2019-04-27T20:33:44
| 2019-04-27T20:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,110
|
py
|
"""
Django settings for my_first_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0e#0+#rh3g%%uerteh2xa6#-v30jd4*f#z_+t7$w@*&c-ft(5d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Install Our app here
'api.apps.ApiConfig',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_first_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_first_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'api.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
|
[
"hyodong000@outlook.com"
] |
hyodong000@outlook.com
|
7edf943665d6fc4b762825851b1485b56daa2a86
|
5c2f520dde0cf8077facc0fcd9a92bc1a96d168b
|
/test/tests/48.py
|
701214cadabfb5fab25b77cf13ac06fbb3227046
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
nagyist/pyston
|
b613337a030ef21a3f03708febebe76cedf34c61
|
14ba2e6e6fb5c7316f66ccca86e6c6a836d96cab
|
refs/heads/master
| 2022-12-24T03:56:12.885732
| 2015-02-25T11:11:08
| 2015-02-25T11:28:13
| 31,314,596
| 0
| 0
|
NOASSERTION
| 2022-12-17T08:15:11
| 2015-02-25T13:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 113
|
py
|
# This should raise a python level error, not an assertion in the compiler
x = 1
y = x.doesnt_exist
print y + 1
|
[
"kevmod@gmail.com"
] |
kevmod@gmail.com
|
56a847e3692eadcd5bfcc70482b5fdd4f134c2dc
|
cdb186ad49bba1406c81f634b936e73f8cb04009
|
/ABC/166/a.py
|
9895741e925d392b5ccba894a64c79fc71a3d3af
|
[] |
no_license
|
ToshikiShimizu/AtCoder
|
9e46f5581f2c1f5149ce1394d61d652cda6256a3
|
41fe6408c20c59bbf1b5d7ee9db2e132f48ad1ac
|
refs/heads/master
| 2023-07-26T22:45:51.965088
| 2023-07-10T14:11:35
| 2023-07-10T14:11:35
| 148,154,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
import sys
#input = sys.stdin.readline
sys.setrecursionlimit(10**7)
INF = 10**10
MOD = 10**9 + 7
# N = int(input())
# A = list(map(int,input().split()))
# A, B, N = map(int,input().split())
S = input()
if S=='ABC':
print ('ARC')
else:
print('ABC')
|
[
"tamreff3290@gmail.com"
] |
tamreff3290@gmail.com
|
51a9d301919a9c7dacb4cb5db074aa3df00179b2
|
c5347d37f7d8018c2e6161de265ed5ced7deab51
|
/budget/urls.py
|
f59d1c1f0c7f1ac13751abfb74f2d159ba9a4042
|
[
"MIT"
] |
permissive
|
davidlares/budget-webapp-django-testing
|
db3f1d5f6f90ccc357271cbe02becf31d4d38355
|
330039ba8a34e14afc96050a5cb9494380edbe84
|
refs/heads/master
| 2022-01-12T15:50:53.638288
| 2019-05-10T14:27:05
| 2019-05-10T14:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.project_list, name='list'),
path('add', views.ProjectCreateView.as_view(), name="add"),
path('<slug:project_slug>', views.project_detail, name='detail')
]
|
[
"="
] |
=
|
f2780f8659ab8faa16a33e70632ee2f9b892c939
|
34212c7d0e41b3a1460ec4b852959bae4889991e
|
/2021/plot_response.py
|
98ed7dd6a2df3e830f694373bace2f2b3236eaea
|
[] |
no_license
|
JialiUMassWengLab/Misc
|
795b07b2a43455eed84bf4a0923477911c6446a6
|
0aca8cbdf975c237c1efdcb7ac763bce3db6162e
|
refs/heads/master
| 2023-08-30T12:55:53.910153
| 2023-08-25T17:33:45
| 2023-08-25T17:33:45
| 47,572,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,217
|
py
|
#! /usr/bin/python
import re
import os
import sys
import glob
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def getData(metric):
files = glob.glob('/home/jzhuang/DB109/Source/*/eff_%s_nom.csv' % metric)
array = []
for fn in files:
d = pd.read_csv(fn,index_col=0)
d = d[d['POP_FASFN']==1]
d = d.dropna(subset=['VISIT'])
if 'PARAM' in d.columns:
d = d[d['PARAM']=='ADAS-cog Total Score']
else:
d = d.rename(columns={'ADAS_TOT_DL':'AVAL_DL'})
d = d.loc[:,list(map(lambda x: x=='VISIT' or x=='STUDYID' or x=='ANALYSIS_WEEK' or \
not re.search(r'_DL$',x)==None,d.columns))]
array.append(d)
df = pd.concat(array,join='inner',axis=0)
df = df[df['ANALYSIS_WEEK']>=0]
#print(df)
return df
def getResponseTbl(metric):
demo = pd.read_csv(os.path.join('/home/jzhuang/DB109/Output','patient_info.csv'),index_col=0)
#demo.loc[:,'Treatment2'] = demo.apply(lambda x: x['Study']+' '+x['Treatment'],axis=1)
df = getData(metric)
#only keep subjects who finished the study
subject_finished = list(df[(df['ANALYSIS_WEEK']==12) | (df['ANALYSIS_WEEK']==24)].index)
df = df.loc[list(map(lambda x: x in subject_finished,df.index)),:]
data = {}
for subject,group in df.groupby(level=0):
data.update({subject: group.sort_values('ANALYSIS_WEEK')['AVAL_DL'][-1]})
S = pd.Series(data,name='ADAS_TOT_DL')
return S
def plotTimeResponse(metric):
demo = pd.read_csv(os.path.join('/home/jzhuang/DB109/Output','patient_info.csv'),index_col=0)
demo = demo[demo['TRBTYP']=='DONEPEZIL']
#demo.loc[:,'Treatment2'] = demo.apply(lambda x: x['Study']+' '+x['Treatment'],axis=1)
df = getData(metric)
sns.set(font_scale=1.5,context='talk')
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('%s_response.pdf' % metric)
for study in df['STUDYID'].unique():
tmp = df.dropna(axis=0,subset=['AVAL_DL'])
tmp = tmp[tmp['STUDYID']==study]
tmp['Subject'] = tmp.index
#S = tmp['Subject'].value_counts()
#print(tmp[list(map(lambda x: x in list(S[S==2].index),tmp['Subject']))])
tmp.index = tmp.apply(lambda x: str(x['Subject'])+' - '+str(x['ANALYSIS_WEEK']),axis=1)
#remove duplicated measurements
tmp = tmp[~tmp.index.duplicated(keep='first')]
tmp = tmp[['Subject','ANALYSIS_WEEK','AVAL_DL']].merge(demo,left_on='Subject',right_index=True,how='left')
print(pd.crosstab(tmp['ANALYSIS_WEEK'],tmp['ARM']))
fig,ax = plt.subplots(figsize=(20,12))
#sns.boxplot(x='Time',y=m2,hue='Treatment2',data=tmp,width=0.6,ax=ax)
sns.pointplot(x='ANALYSIS_WEEK',y='AVAL_DL',hue='ARM',data=tmp,dodge=True,ci=68.47,join=True,ax=ax)
plt.title(study)
plt.savefig(pdf,format='pdf')
plt.close()
pdf.close()
def getAUC(metric):
df = getData(metric)
#only keep subjects who finished the study
subject_finished = list(df[(df['ANALYSIS_WEEK']==12) | (df['ANALYSIS_WEEK']==24)].index)
df = df.loc[list(map(lambda x: x in subject_finished,df.index)),:]
tmp = df.dropna(axis=0,subset=['AVAL_DL']).copy()
tmp['Subject'] = tmp.index
tmp.index = tmp.apply(lambda x: str(x['Subject'])+' - '+str(x['ANALYSIS_WEEK']),axis=1)
#remove duplicated measurements
tmp = tmp[~tmp.index.duplicated(keep='first')]
AUC = {}
for pid,group in tmp[['Subject','ANALYSIS_WEEK','AVAL_DL']].groupby('Subject'):
group = group.sort_values('ANALYSIS_WEEK',ascending=True)
changes = list(group['AVAL_DL'])
if not 24 in list(group['ANALYSIS_WEEK']):
changes.append(group.loc[str(pid)+' - 12.0','AVAL_DL'])
auc = 0
for i in range(group.shape[0]-1):
auc += (changes[i]+changes[i+1])/2.0*(group.iloc[i+1,-2]-group.iloc[i,-2])
AUC.update({str(pid):auc})
S = pd.Series(AUC,name=metric+'_AUC')
demo = pd.read_csv(os.path.join('/home/jzhuang/DB109/Output','patient_info.csv'),index_col=0)
demo = pd.concat([demo,S],join='inner',axis=1)
array = []
for study,group in demo.groupby('STUDYID'):
pbo_mean = np.mean(group[(group['ARM']=='PBO') | (group['ARM']=='Placebo')][metric+'_AUC'])
pbo_std = np.std(group[(group['ARM']=='PBO') | (group['ARM']=='Placebo')][metric+'_AUC'])
group2 = group[(group['ARM']!='PBO') & (group['ARM']!='Placebo')].copy()
group2[metric+'_AUC_z'] = group2.apply(lambda x: (x[metric+'_AUC']-pbo_mean)/pbo_std,axis=1)
group2['STUDYID'] = [study] * group2.shape[0]
array.append(group2[['ARM','STUDYID',metric+'_AUC_z']])
zDf = pd.concat(array,join='inner',axis=0)
zDf.to_csv('/home/jzhuang/DB109/Output/%s_AUC_zscore.csv' % metric)
print(zDf)
demo[['ARM','STUDYID',metric+'_AUC']].to_csv('/home/jzhuang/DB109/Output/%s_AUC.csv' % metric)
def main():
plotTimeResponse('adas')
#S = getResponseTbl('adas')
#S.to_csv('/home/jzhuang/DB109/Output/response.csv')
#getAUC('adas')
if __name__=='__main__':
main()
|
[
"jlzhuang87@gmail.com"
] |
jlzhuang87@gmail.com
|
22fc100c9cbc0c85936d6c1159b5de9e3ad51583
|
ccb328be3723d6967b6afd1831ca18f00df0b016
|
/Convert files.py
|
69b6d352f8636243cf9c91bb3ade4e3378559d50
|
[] |
no_license
|
RamachandiranVijayalakshmi/Convert-following-datetime-instance-into-string-format
|
3bbd43e7cd7dc13faefc07177100e3a8720752e3
|
f916ab000268b6f49b85d2455ddbcff6dd0f3c30
|
refs/heads/main
| 2023-01-24T14:03:45.947903
| 2020-12-12T16:47:46
| 2020-12-12T16:47:46
| 320,635,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from datetime import datetime
given_date = datetime(2020, 2, 25)
string_date = given_date.strftime("%Y-%m-%d %H:%M:%S")
print(string_date)
|
[
"noreply@github.com"
] |
RamachandiranVijayalakshmi.noreply@github.com
|
620f31ff27295887ffa9e963707f744ebcfe05ea
|
cc6cf69de91f9419dd75d91da3448c244e0b5658
|
/Curso Python/ex009.py
|
e11a8e425c834148530d1f4e74a6a8f4d690673a
|
[
"MIT"
] |
permissive
|
sandro-fidelis/Cursos
|
5f9248bc400186793dec950a9edee9fb49091c5b
|
cee1960181b1309be93034694cab8cf2878e2194
|
refs/heads/main
| 2023-07-28T06:57:40.604393
| 2021-09-14T20:40:59
| 2021-09-14T20:40:59
| 328,688,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
n = int(input('Qual tabuada deseja ver: '))
c=1
print(11*'=')
while c <= 10:
print('{} x {:2} = {}'.format(n,c,c*n))
c += 1
print(11*'=')
|
[
"54478235+sandro-fidelis@users.noreply.github.com"
] |
54478235+sandro-fidelis@users.noreply.github.com
|
770e7ed8f36edb31f3f1ef3dfdd713e584abaabc
|
0daeabeb24b6e90e1ffa1eaf45a5854e128812ba
|
/src/ggrc/models/associationproxy.py
|
0350f136eb1fca1e664397f44259f8305ecf40d3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
2947721120/sagacious-capsicum
|
3d3e47cb2ece4b4473895581345d7479a62e9e8d
|
671d2b4a17a1d82c7ad4ffa22a27b17dfc9addcd
|
refs/heads/master
| 2021-01-13T13:01:01.266842
| 2016-02-04T18:04:08
| 2016-02-04T18:04:08
| 47,549,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By:
from sqlalchemy.ext.associationproxy \
import association_proxy as orig_association_proxy
"""Wrapper for SQLAlchemy association proxies. Automatically add creator
function for model classes for join table associations.
"""
def resolve_class(model_class):
if type(model_class) is str:
import ggrc.models
return getattr(ggrc.models, model_class)
return model_class
def association_proxy(target_collection, attr, model_class):
"""Return an association proxy with a creator function specified."""
#FIXME is model_class needed? can't that be determined off of reflection?!
return orig_association_proxy(target_collection, attr, creator=\
lambda arg: resolve_class(model_class)(**{
attr: arg,
'modified_by_id': 1, #FIXME
}))
|
[
"david@thecharboneaus.net"
] |
david@thecharboneaus.net
|
8817508835dfcbdb7a4bc238056806024829ee01
|
f9d39ab1429ca32418a766cfceaf49ed21d75abf
|
/exercise_2020_01/exercise1.py
|
ad7fa5aca991bbf48d7b01d9ff060508134d9ed6
|
[
"Apache-2.0"
] |
permissive
|
yorkshire-geek/advent_of_code_2020
|
31a8886858201a9b0359e9f9dfc6657d9afca6a6
|
6868f89849ab7347689136b010020f8cd90f2d93
|
refs/heads/main
| 2023-01-24T23:01:27.230369
| 2020-12-06T11:29:45
| 2020-12-06T11:29:45
| 317,262,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
def return_list():
result = []
with open("input.txt", 'r') as input_file:
for line in input_file:
result.append(int(line.strip()))
input_file.close()
return result
if __name__ == "__main__":
list_of_ints = return_list()
list_of_ints.sort()
print("--- find 2020 with two entries---")
for first in list_of_ints:
for second in list_of_ints:
total = first + second
if total == 2020:
print(first, second)
print(first * second)
for first in list_of_ints:
for second in list_of_ints:
for third in list_of_ints:
total = first + second + third
if total == 2020:
print(first, second, third)
print(first * second * third)
|
[
"david.turner@sonocent.com"
] |
david.turner@sonocent.com
|
b53c8d83659f41af4480372772684be13c465601
|
387fc1d31252f7dce3726f26a9d64f4a868083c0
|
/samples/python/guides/images16.py
|
f90a6ffe0757c965d98e357f1a692075decc0aa8
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
gino-m/earthengine-community
|
3081e2806ce7bcb1980acfa2c04d97abd5a6efe7
|
ff3b589134b8ace62502f45b11c705098c818d8e
|
refs/heads/master
| 2022-02-20T06:56:20.473192
| 2022-02-11T00:39:05
| 2022-02-11T00:39:33
| 201,116,826
| 3
| 1
|
Apache-2.0
| 2021-06-18T20:43:12
| 2019-08-07T19:42:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
# Copyright 2020 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Earth Engine Developer's Guide examples for 'Images - Spectral transformations'."""
# [START earthengine__images16__unmixing]
# Load a Landsat 5 image and select the bands we want to unmix.
bands = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7']
image = ee.Image('LANDSAT/LT05/C01/T1/LT05_044034_20080214').select(bands)
# Define spectral endmembers.
urban = [88, 42, 48, 38, 86, 115, 59]
veg = [50, 21, 20, 35, 50, 110, 23]
water = [51, 20, 14, 9, 7, 116, 4]
# Unmix the image.
fractions = image.unmix([urban, veg, water])
# Define a map centered on San Francisco Bay.
map_fractions = folium.Map(location=[37.5010, -122.1899], zoom_start=10)
# Add the image layers to the map and display it.
map_fractions.add_ee_layer(
image, {'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 128}, 'image')
map_fractions.add_ee_layer(fractions, None, 'unmixed')
display(map_fractions.add_child(folium.LayerControl()))
# [END earthengine__images16__unmixing]
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
bac404b702c7c2e55d3b5f877c3a39a6c4fce3ae
|
8d41e1c623b39f54da76d8b3c65a0e0a4fd2b551
|
/QueueCheck-requests.py
|
ea7875d0cd80078d0b9a84837aad29b1bc0b5ae5
|
[] |
no_license
|
heijigaoke/AMQ-monitoring
|
a28166dffe8e26d3360e89e1d32df6f9d048d64a
|
206d967f05edb04e55cb341ea3e4d8068d51feea
|
refs/heads/master
| 2020-06-06T23:29:36.902469
| 2018-09-25T11:19:01
| 2018-09-25T11:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
#!/usr/bin/python3.6
import sys
import requests
import json
# first argument should be ip address
# second argument should be port
# third argument should be one of queue names
# fourth argument should be 'EnqueueCount' or 'DequeueCount' or 'ConsumerCounts' or 'OueueSize'
url = gittp://{}:{}/api/jolokia/read/org.apache.activemq:brokerName=localhost,destinationName={},destinationType=Oueue,type=Broker.format(sys.argv[1], sys.argv[2], sys.argv(3])
try:
response = requests.get(url=url, auth=('USERNAME', 'PASSWORD')
result = json.loads(response.content)
print(int(result['value']['{}'.format(sys.argv[4])]))
except Exception as a:
print(a)
|
[
"noreply@github.com"
] |
heijigaoke.noreply@github.com
|
b417c143d2627605b42b7f8daee98f149a41f248
|
442cf473443c3c033f045d147c359445ea9c1402
|
/tonya_bot/source/settings_example.py
|
8256e57d1f93d115e06b0a966ff48f0d5345de9f
|
[] |
no_license
|
Coloboros/TONYA
|
536de45d5e0259fe2dca5204fb6c65587a9dbbb9
|
359c00d1afc66adce75c601ac5872d59dcda7ed9
|
refs/heads/main
| 2023-06-12T19:47:02.075803
| 2021-07-08T19:05:14
| 2021-07-08T19:05:14
| 378,373,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
import os
BOT_TOKEN = 'Some Telegramm bot tocken'
SERVER_HOST = '0.0.0.0:3253/api/'
SERVER_HOST_AUTH_URL = '0.0.0.0:3253/api/user/auth'
SERVER_HOST_PROTOCOL = 'http'
SOURCE_PATH = os.path.split(__file__)[0]
|
[
"deposha322@mail.ru"
] |
deposha322@mail.ru
|
33b750230707d0305339feaee5ca36824a728853
|
2aaa9780fc683ee535205742ecb77c2f2e7b4c5f
|
/flask_app/s3.py
|
da4251a21f5c8fbe16bea8a2e24b867cb7bdb94e
|
[] |
no_license
|
climam/Railsbank_Mock_Card_Endpoints
|
3886ecbc3309e2828a3467964b1b910c23e43f6b
|
5d74abf86320cfd1be39d6b81fe74efd37639406
|
refs/heads/master
| 2022-11-09T22:19:41.444595
| 2020-06-26T16:07:33
| 2020-06-26T16:07:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
__author__ = "Bruce Pannaman"
import boto3
import botocore
class s3:
def __init__(self):
self.bucketName = "{Your s3 bucket name}"
self.local_filepath = "/tmp/temp_storage"
self.s3client = boto3.client('s3')
s3resource = boto3.resource('s3')
self.s3resource = s3resource.Bucket(self.bucketName)
def upload_to_s3(self, card_id):
"""
Uploads a card file to s3
:param card_id: card_id to be updated
:return: None
"""
self.s3client.upload_file("%s/%s.json" % (self.local_filepath, card_id), self.bucketName,
"staging_cards/%s.json" % card_id)
def get_file(self, card_id):
"""
Downloads a card_id from s3
:param card_id:
:return:
"""
try:
self.s3resource.download_file("staging_cards/%s.json" % card_id,
"%s/%s.json" % (self.local_filepath, card_id))
print("successfully downloaded %s.json" % card_id)
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
return False
else:
return False
|
[
"bruce@pannaman.com"
] |
bruce@pannaman.com
|
253fa0e7077b76e305a9ee7e04125ac836321ab0
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_6/cnremm002/question4.py
|
349c4f29977ee2a2864796a3da87ca34d39ce2ff
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
"""Grade
Emmanuel Conradie
25 April 2014"""
#marks input
mark = []
marks = input("Enter a space-separated list of marks:\n")
marks = marks.split(" ")
fail = 0
third = 0
second1 = 0
second2 = 0
first = 0
#sort marks into catagories
for i in marks:
if int(i) < 50:
fail += 1
elif int(i) >=50 and int(i) < 60:
third += 1
elif int(i) >=60 and int(i) < 70:
second1 += 1
elif int(i) >=70 and int(i) < 75:
second2 += 1
elif int(i) >= 75:
first += 1
#print results
print ("1 |" + "X"*first)
print ("2+|" + "X"*second2)
print ("2-|" + "X"*second1)
print ("3 |" + "X"*third)
print ("F |" + "X"*fail)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
c626856030bb7194c786d3b64456a7553abe90ae
|
1f2e375d85a47141d87d323d3186a576dd4a2303
|
/app/__init__.py
|
1c9601a3c93ed5001866b32ba585a0624f4eebc9
|
[] |
no_license
|
nmineev/freqspec-of-sound
|
0a1ccdcbf404f0dccc6fdae103780ae05cb9785e
|
6d145232d822d0c8c68d5de0cf0268239a05c494
|
refs/heads/master
| 2022-12-27T16:13:23.579093
| 2020-09-29T12:52:49
| 2020-09-29T12:52:49
| 299,609,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# -*- coding: utf-8 -*-
import os
from flask import Flask
from flask_bootstrap import Bootstrap
def create_app(test_config=None):
# create the app
app = Flask(__name__, instance_relative_config=True)
app.config["SECRET_KEY"]='dev'
bootstrap = Bootstrap(app)
from . import freqspec_builder
app.register_blueprint(freqspec_builder.bp)
return app
|
[
"nmineev98@gmail.com"
] |
nmineev98@gmail.com
|
e51ea5339c55f14d2b16346cc64c8f11eaa2d1ab
|
9f7512711f78d71a5de7ec54001411cb9c319424
|
/glc_env/bin/easy_install-3.8
|
70b3fead27c551349f4d1219a3ec7613bf5de9c9
|
[] |
no_license
|
charles-co/glc_project
|
a819ad5b401ba2279901f8f752f7a9331271d376
|
ae8c3cba6dcb416d7afa3abbbf439f48003b6e9f
|
refs/heads/main
| 2023-03-29T17:28:05.518810
| 2021-04-03T18:00:16
| 2021-04-03T18:00:16
| 336,250,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
8
|
#!/home/glc/glc_project/glc_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"charlesboy49@gmail.com"
] |
charlesboy49@gmail.com
|
ba150da1be21589a22c729f3b45bc5850ce42ea3
|
e5f1597886b11f4a59173c86d804f5227f41f0f0
|
/bot.py
|
3cdeb6d6fed06bd79a66e8c7d193b2d9a65e6b6c
|
[] |
no_license
|
Forevka/forevka_bot_telegram
|
85abe2f314c075ee1a17ec9e3c1d411f38018de6
|
6363135b796a982512e4e38b2de650070dde4342
|
refs/heads/master
| 2021-08-31T21:23:13.731862
| 2017-12-23T00:13:39
| 2017-12-23T00:13:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,532
|
py
|
import sys, traceback, time, config, telebot,sqlite3, dbworker, random, grabing, ast, get_weather, virus_total
import wget,os
import lxml.html as lhtml
from google import search
from telebot import types
from vedis import Vedis
from calculator.simple import SimpleCalculator
from googletrans import Translator
conn = sqlite3.connect('bot_data.sqlite')
# Создаем курсор - это специальный объект который делает запросы и получает их результаты
cursor = conn.cursor()
bot = telebot.TeleBot(config.token)
###
markup = types.ReplyKeyboardMarkup()
markup.row('Политика', 'Спорт')
markup.row('Культура', 'Наука')
hide_mark=types.ReplyKeyboardRemove()
markup_story=types.ReplyKeyboardMarkup()
markup_story.row('ItHappens', 'Bash.im')
markup_weather=types.ReplyKeyboardMarkup()
#sticker=types.get_sticker_set("Catkus")
@bot.message_handler(func=lambda message: True, commands=['help',"test","news","roll","sticker","advice","story","weather","check","music","comics"])
def check_command(message):
if message.text=="/help":
bot.send_message(message.chat.id,"Я могу:\n/test - тестовая функция\n/help - показать это сообщение\n/news - покажу вам новость на выбор\n/roll - случайное число от 1 до 100\n/calc - решу вам уравнение\n/trans - переведу вам слово(через дорогу)\n/say - скажу вашу фразу\n/story - расскажу занятную историю связанную с IT\n/weather - покажу погоду за вибраною датою та часом\n/check - провірю ваш файл на віруси\n/music - відправлю вам музику сгенеровану комп'ютером\n/comics - відправлю вам комікс")
elif message.text=="/test":
bot.send_message(message.chat.id,"TEST TEST \nTEST TEST")
elif message.text=="/news":
#data = urllib2.urlopen(query)
#reply=search('Новости', tld='ru', lang='ru', stop=1)
bot.send_message(message.chat.id,"Про какие новости вы хотите узнать?\nТыкай, тыкай! Ну или вводи сам", reply_markup=markup)
dbworker.set_state(message.chat.id, config.States.S_CHOOSE_THEME.value)
#bot.send_message(message.chat.id,"Про какие новости вы хотите узнать?\n Тыкай, тыкай!", reply_markup=hide_mark)
elif message.text=="/sticker":
img_file=open("stickers/kuku.webp")
bot.send_document(message.chat.id,img_file)
elif message.text=="/roll":
bot.send_message(message.chat.id,"Случайное число 1 до 100: "+str(random.randint(1,100)))
elif message.text=="/story":
#
bot.send_message(message.chat.id, "С какого сайта?", reply_markup=markup_story)
dbworker.set_state(message.chat.id, config.States.S_GET_STORY.value)
elif message.text=="/weather":
reply=get_weather.weather();
data_time=[];
temperature=[];
winter_speed=[];
winter_direction=[];
description=[];
for i in range(0,len(reply),1):
#print(reply[i][0])
data_time.append(reply[i][0])
#print(data_time[i])
temperature.append(reply[i][1])
winter_speed.append(reply[i][2])
winter_direction.append(reply[i][3])
description.append(reply[i][4])
for i in range(0,20,1):
markup_weather.row(data_time[i],data_time[i+20])
bot.send_message(message.chat.id, "Погода за какое время?",reply_markup=markup_weather)
dbworker.set_state(message.chat.id, config.States.S_ENTER_DATE.value)
elif message.text=="/check":
bot.send_message(message.chat.id, "Відправ мені файл який потрібно перевірити на віруси")
dbworker.set_state(message.chat.id, config.States.S_CHECK_AV.value)
elif message.text=="/music":
music=grabing.get_music();
bot.send_audio(message.chat.id, audio=open(music, 'rb'))
os.remove(music)
elif message.text=="/comics":
list_img=grabing.get_comics();
bot.send_message(message.chat.id, list_img[0])
bot.send_photo(message.chat.id, photo=open(list_img[2], 'rb'))
bot.send_message(message.chat.id, list_img[1])
os.remove(list_img[2])
#elif message.text=="/calc":
# bot.send_message(message.chat.id,"Отправьте уравнение")
# dbworker.set_state(message.chat.id, config.States.S_ENTER_EXPR.value)
#print(config.States.S_ENTER_EXPR.value)
#elif message.text=="/sticker":
# bot.send_sticker(message.chat.id,)
@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.S_GET_STORY.value)
def user_story_send(message):
if message.text=="ItHappens":
reply=grabing.get_story(message.text);
elif message.text=="Bash.im":
reply=grabing.get_story(message.text);
else:
reply="Незнаю такого сайта :("
dbworker.set_state(message.chat.id, config.States.S_START.value)
bot.send_message(message.chat.id, reply, reply_markup=hide_mark)
@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.S_CHECK_AV.value, content_types=['document'])
def user_check_antivirus(message):
#print("AV")
#bot.send_message(message.chat.id, message)
file_id=message.document.file_id;
new_file=bot.get_file(file_id)
print(new_file)
#test_file=urllib.URLopener()
path="https://api.telegram.org/file/bot"+config.token+"/"+new_file.file_path
print(path)
source=wget.download(path)
print(source)
reply=virus_total.virus_check(source)
#urllib.urlretrieve("https://api.telegram.org/file/"+config.token+"/"+new_file.file_path, message.document.file_name)
#https://api.telegram.org/file/bot<token>/<file_path>
#new_file.download(message.document.file_name)
bot.send_message(message.chat.id, reply)
@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.S_ENTER_DATE.value)
def user_entering_date(message):
reply=get_weather.weather();
data_time=[];
for i in range(0,len(reply),1):
#print(reply[i][0])
data_time.append(reply[i][0])
if data_time[i]==message.text:
reply="Дата: "+reply[i][0]+"\nТемпература: "+reply[i][1]+"\nШвидкість вітру: "+reply[i][2]+"\nНапрям вітру: "+reply[i][3]+"\nОпади: "+reply[i][4]
break
dbworker.set_state(message.chat.id, config.States.S_START.value)
bot.send_message(message.chat.id, reply, reply_markup=hide_mark)
@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.S_ENTER_EXPR.value)
def user_entering_expression(message):
reply=""
try:
reply=message.text+"="+str(eval(message.text))
except:
reply="Чето пошло не так...\nВозможно делишь на ноль"
#reply=message.text
bot.send_message(message.chat.id, reply)
dbworker.set_state(message.chat.id, config.States.S_START.value)
@bot.message_handler(func=lambda message: dbworker.get_current_state(message.chat.id) == config.States.S_CHOOSE_THEME.value)
def user_entering_theme(message):
reply=grabing.find_news(message.text)#grabing.find_science();#"https://ukr.media/science/"+str(random.randomint(334000,335030)
bot.send_message(message.chat.id, reply, reply_markup=hide_mark)
dbworker.set_state(message.chat.id, config.States.S_START.value)
@bot.message_handler(func=lambda message: True,content_types=["text"])
def check_text(message):
reply="";
if message.text=="Привет":
bot.send_message(message.chat.id,"HI")
if message.text[0]=="/":
if message.text.find("calc")>0:
#bot.send_message(message.chat.id,"Calculator")
reply=message.text[6:]
if len(reply)>0:
try:
reply=reply+"="+str(eval(reply))
except ValueError:
reply="Что-то пошло не так"
except ZeroDivisionError:
reply="Делишь на ноль"
else:
reply="Нету уравнения, вводи так:\n/calc 12+6"
if message.text.find("trans")>0:
#reply="translate"
reply=message.text[10:]
destin=message.text[7:9]
if len(reply)>0:
#print(destin)
print(reply)
translator = Translator()
try:
reply=translator.translate(reply, dest=destin, src="uk")
reply="Переведено як: "+str(reply.text)
except:
reply="Чето пошло не так"
else:
reply="Нема слова для переводу\nВводи так: /trans мова_в_яку_переводити слово_для_переводу\nen-англійська\nru-російська\nПриклад: /trans en привіт"
if message.text.find("say")>0:
reply=message.text[4:]
if len(reply)<=0:
reply="Ты не сказал что мне нужно говорить!\n/say привет. И я отправлю привет"
bot.send_message(message.chat.id,reply)
#else:
# bot.send_message(message.chat.id,"Я такого не знаю!")
#elif message.text=="/faq":
# bot.send_message(message.chat.id,"I am Forevka BOT!")
def telegram_main(n):
try:
bot.polling(none_stop=True,timeout=180)
except:
traceback_error_string=traceback.format_exc()
with open("Error.Log", "a") as myfile:
myfile.write("\r\n\r\n" + time.strftime("%c")+"\r\n<<error polling="">>\r\n"+ traceback_error_string + "\r\n<<error polling="">>")
bot.stop_polling()
time.sleep(10)
#telegram_main(1);
if __name__ == '__main__':
telegram_main(1);
#bot.polling(none_stop=True)
|
[
"noreply@github.com"
] |
Forevka.noreply@github.com
|
a414686d76bdc2d63ea904bb126ad4fda325e8a8
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/cloudsave/models/models_bulk_get_admin_player_record_request.py
|
5969988affec0459d9429ac7ada139535ba14966
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,953
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Cloudsave Service (3.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsBulkGetAdminPlayerRecordRequest(Model):
"""Models bulk get admin player record request (models.BulkGetAdminPlayerRecordRequest)
Properties:
keys: (keys) REQUIRED List[str]
"""
# region fields
keys: List[str] # REQUIRED
# endregion fields
# region with_x methods
def with_keys(self, value: List[str]) -> ModelsBulkGetAdminPlayerRecordRequest:
self.keys = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "keys"):
result["keys"] = [str(i0) for i0 in self.keys]
elif include_empty:
result["keys"] = []
return result
# endregion to methods
# region static methods
@classmethod
def create(cls, keys: List[str], **kwargs) -> ModelsBulkGetAdminPlayerRecordRequest:
instance = cls()
instance.keys = keys
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelsBulkGetAdminPlayerRecordRequest:
instance = cls()
if not dict_:
return instance
if "keys" in dict_ and dict_["keys"] is not None:
instance.keys = [str(i0) for i0 in dict_["keys"]]
elif include_empty:
instance.keys = []
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelsBulkGetAdminPlayerRecordRequest]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelsBulkGetAdminPlayerRecordRequest]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelsBulkGetAdminPlayerRecordRequest,
List[ModelsBulkGetAdminPlayerRecordRequest],
Dict[Any, ModelsBulkGetAdminPlayerRecordRequest],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"keys": "keys",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"keys": True,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
f36ac72326e5a95f8a19126fdfd9bf06dc8f6261
|
7e65740956db489cc46f722027af5e560912c162
|
/sweetxcyanide.py
|
85c1f7f500b78a77f3f7bce3492af27c2f61b4da
|
[
"MIT"
] |
permissive
|
ApoorvTyagi/youtube-apiv3
|
65f70ecdf33693af1be5433bb834bca9887abe6a
|
1cc419c4a113e5a584cdfa6631afce933033ad41
|
refs/heads/master
| 2023-04-06T01:27:51.008250
| 2021-04-13T15:21:43
| 2021-04-13T15:21:43
| 284,260,055
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,613
|
py
|
'''GET video statistics using youtube api v3 and update the video's metadata'''
#importing all required liberaries
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import logging
import time
from apiclient.discovery import build
# Setting Up Logging
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
# Set up YouTube credentials
DEVELOPER_KEY = "" # Your Developer Key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
CHANNEL_ID = "" # Put your YT channel Id here
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY,
cache_discovery=False,
)
scopes = ["https://www.googleapis.com/auth/youtube.force-ssl"]
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = (
"client_secret.json" # This json file you will get from youtube api dashboard
)
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes
)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials, cache_discovery=False
)
def youtubeSearch(query):
logger.info("Inside youtube search")
search_response = (
youtube.search()
.list(
q=query,
type="video",
order="viewCount",
part="id,snippet",
channelId=CHANNEL_ID,
)
.execute()
)
logger.info("Search Completed...")
print("Total results: {0}".format(search_response["pageInfo"]["totalResults"]))
return search_response
def storeResults(response):
# create variables to store your values
channelId = []
viewCount = []
likeCount = []
commentCount = []
logger.info("Storing new search results")
for search_result in response.get("items", []):
if (
search_result["id"]["kind"] == "youtube#video"
and search_result["id"]["videoId"] == "" # Put your video id for which you want to get and store stats
):
# append title and video for each item
videoId.append(search_result["id"]["videoId"])
# then collect stats on each video using videoId
stats = (
youtube.videos()
.list(part="statistics, snippet", id=search_result["id"]["videoId"])
.execute()
)
channelId.append(stats["items"][0]["snippet"]["channelId"])
viewCount.append(stats["items"][0]["statistics"]["viewCount"])
# Not every video has likes/dislikes enabled so they won't appear in JSON response
try:
likeCount.append(stats["items"][0]["statistics"]["likeCount"])
except:
# Good to be aware of Channels that turn off their Likes
# Appends "Not Available" to keep dictionary values aligned
likeCount.append("Not available")
# Sometimes comments are disabled so if they exist append, if not append nothing...
# It's not uncommon to disable comments, so no need to wrap in try and except
if "commentCount" in stats["items"][0]["statistics"].keys():
commentCount.append(stats["items"][0]["statistics"]["commentCount"])
else:
commentCount.append(0)
# Break out of for-loop and store lists of values in dictionary
youtube_dict = {
"channelId": channelId,
"videoId": videoId,
"likeCount": likeCount,
"commentCount": commentCount,
}
return youtube_dict
def getDetails():
# Run YouTube Search
query = "" #Your YT channel name[ remeber Name !ID ;)]
response = youtubeSearch(query)
results = storeResults(response)
return results
def getMetaData():
results = getDetails()
# Replace xyz with your same video id for which you got the stats
view_count = results.get("viewCount")[results.get("videoId").index("xyz")]
likes = results.get("likeCount")[results.get("videoId").index("xyz")]
comments = results.get("commentCount")[results.get("videoId").index("xyz")]
print(view_count, likes, comments)
return view_count, likes
def changeTitle(new_title):
request = youtube.videos().update(
part="snippet,status",
body={
"id": "pJMTlRzlTmc",
"snippet": {
"categoryId": 20, # Every video has a category, for example 20 is for video games
"defaultLanguage": "en", # Your video's language
"description": desc, # Put new description
"tags": [], # Add comma seperated tags here
"title": new_title,
},
"status": {"privacyStatus": "public"}, # By default privacy status is private
},
)
res = request.execute()
print(res)
oldViews = 0
def main():
newViews, likes = getMetaData()
if int(newViews) > oldViews:
logger.info("Views Increased...")
oldViews = int(newViews)
oldLikes = int(likes)
new_title = "This video has {} views".format(newViews)
try:
changeTitle(new_title)
except:
logger.info("Error occured in updating title")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ApoorvTyagi.noreply@github.com
|
007f1bf6e621cb68ad91f330a33b674ee5f7ed80
|
720a85f9170164620b6225bd39c09cf866e0253a
|
/loops/while loop/multi-muliplication using WHILE.py
|
79146de104dee59588ecc6384356f6ed93eedf12
|
[] |
no_license
|
mourice-oduor/Python-3
|
a333d27a767e4f3a60c1d76eddca89b1f16ddc9f
|
e49f30d2aff2afb20f8989d2c296edda11500c69
|
refs/heads/master
| 2020-08-27T07:48:04.626974
| 2020-02-06T17:05:03
| 2020-02-06T17:05:03
| 217,289,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
num = int(input("Enter any number: "))
i = 1
while(i<num):
print("\n")
i = i+1
j = 1
while(j<11):
print(i,"X",j, "=", i*j)
j = j+1
|
[
"otienomaurice364@gmail.com"
] |
otienomaurice364@gmail.com
|
d8b272891bed8d7d331ec677e732e32b6fbee606
|
ccf7ca1e3eb8918426fd33b61855768a0a4e06ee
|
/app/apps/address/forms.py
|
9bb0d6a0807dc90ce6bae4bba2cb7f605f755226
|
[
"MIT"
] |
permissive
|
barisortac/mini-erp-docker
|
b4a77370d8cc10bc010756180a6c9a033276399e
|
f5c37c71384c76e029a26e89f4771a59ed02f925
|
refs/heads/master
| 2023-02-13T16:27:58.594952
| 2021-01-15T21:23:40
| 2021-01-15T21:23:40
| 322,933,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,022
|
py
|
# from __future__ import unicode_literals
# from dal import autocomplete
# from django import forms
# from django.utils.translation import ugettext_lazy as _t
# from django.urls import reverse
# from crispy_forms.helper import FormHelper
# from crispy_forms.layout import Layout
# from crispy_forms.layout import Fieldset
# from crispy_forms.layout import ButtonHolder
# from crispy_forms.layout import Submit
# from crispy_forms.layout import HTML
# from core.forms import CoreModelForm
# from .models import Address, District, Township, State, City
#
#
# class AddressForm(CoreModelForm):
#
# def __init__(self, *args, **kwargs):
# self.user = kwargs.pop("user")
#
# super(AddressForm, self).__init__(*args, **kwargs)
#
# self.helper = FormHelper()
# self.helper.form_method = 'post'
# self.helper.form_class = 'form-horizontal'
# self.helper.label_class = 'col-lg-2 col-md-2'
# self.helper.field_class = 'col-lg-4 col-md-4'
# # self.helper.form_show_errors = True
# self.helper.form_error_title = _t('Errors:')
#
# self.fields["district"] = forms.ModelChoiceField(
# queryset=District.objects.all(),
# label=_t("District"),
# required=False,
# widget=autocomplete.ModelSelect2(
# url='district-autocomplete',
# attrs={"data-placeholder": _t("Districts...")},
# forward=['township']
# )
# )
#
# self.fields["city"] = forms.ModelChoiceField(
# queryset=City.objects.all(),
# label=_t("City"),
# required=True,
# widget=autocomplete.ModelSelect2(
# url='city-autocomplete',
# attrs={"data-placeholder": _t("City...")},
# )
# )
#
# self.fields["township"] = forms.ModelChoiceField(
# queryset=Township.objects.all(),
# label=_t("Township"),
# required=True,
# widget=autocomplete.ModelSelect2(
# url='township-autocomplete',
# attrs={"data-placeholder": _t("Township")},
# forward=['city']
# )
# )
# self.fields["state"] = forms.ModelChoiceField(
# queryset=State.objects.all(),
# label=_t("State"),
# required=False,
# widget=autocomplete.ModelSelect2(
# url='state-autocomplete',
# attrs={"data-placeholder": _t("State")}
# )
# )
#
# # self.fields["address"] = forms.ModelChoiceField(
# # queryset=Address.objects.all(),
# # label=_t("Address"),
# # required=True,
# # widget=autocomplete.ModelSelect2(
# # url='address-autocomplete',
# # attrs={"data-placeholder": _t("Address...")},
# # )
# # )
#
# list_button = HTML(
# "<a href='{}' class='btn btn-success btt'>{}</a>".format(reverse("address-list"), _t("Address List")))
#
# if self.instance and self.instance.id:
# save_button = _t("Update")
# else:
# save_button = _t("Save")
#
# buttons = ButtonHolder(
# Submit('submit', save_button),
# list_button
# )
#
# self.helper.layout = Layout(
# Fieldset(
# _t("Address Form"),
# buttons,
# 'address_title',
# 'address',
# 'state',
# 'city',
# 'township',
# 'district',
# 'postal_code',
# 'phone',
# 'internal',
# 'fax',
# 'tax_no',
# 'tax_office'
# )
# )
#
# class Meta:
# model = Address
# exclude = ('created_at', 'created_by', 'is_deleted', 'is_active', 'deleted_at', 'deleted_by', 'data',)
|
[
"baris.ortac@hamurlabs.com"
] |
baris.ortac@hamurlabs.com
|
caed48b5b5fd31495310e9327d58a1c4d19d8ffd
|
4c252eb68446d5fd050e28a6b5ba1a7879b70b0a
|
/pyuavcan/_cli/commands/_paths.py
|
a270e2ab069d3ad6ba09df9106feaf9e9ff563e3
|
[
"MIT"
] |
permissive
|
jxltom/pyuavcan
|
ce2cdf3a95ba4c6f3a0fd8aae24b341e46481fae
|
42063b65ee2af431ab485f228d1ed5465a576449
|
refs/heads/master
| 2021-01-16T15:09:48.547764
| 2020-05-26T09:31:25
| 2020-05-26T09:31:25
| 243,163,363
| 0
| 0
|
MIT
| 2020-02-26T03:53:47
| 2020-02-26T03:53:46
| null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import os
import sys
import pathlib
import pyuavcan
VERSION_AGNOSTIC_DATA_DIR: pathlib.Path
"""
The root directory of version-specific data directories.
Its location is platform-dependent.
It is shared for all versions of the library.
"""
if hasattr(sys, 'getwindowsversion'): # pragma: no cover
_appdata_env = os.getenv('LOCALAPPDATA') or os.getenv('APPDATA')
assert _appdata_env, 'Cannot determine the location of the app data directory'
VERSION_AGNOSTIC_DATA_DIR = pathlib.Path(_appdata_env, 'UAVCAN', 'PyUAVCAN')
else:
VERSION_AGNOSTIC_DATA_DIR = pathlib.Path('~/.uavcan/pyuavcan').expanduser()
VERSION_SPECIFIC_DATA_DIR: pathlib.Path = \
VERSION_AGNOSTIC_DATA_DIR / ('v' + '.'.join(map(str, pyuavcan.__version_info__[:2])))
"""
The directory specific to this version of the library where resources and files are stored.
This is always a subdirectory of :data:`VERSION_AGNOSTIC_DATA_DIR`.
The version is specified down to the minor version, ignoring the patch version (e.g, 1.1),
so that versions of the library that differ only by the patch version number will use the same directory.
This directory contains the default destination path for highly volatile or low-value files.
Having such files segregated by the library version number ensures that when the library is updated,
it will not encounter compatibility issues with older formats.
"""
OUTPUT_TRANSFER_ID_MAP_DIR: pathlib.Path = VERSION_SPECIFIC_DATA_DIR / 'output-transfer-id-maps'
"""
The path is version-specific so that we won't attempt to restore transfer-ID maps stored from another version.
"""
OUTPUT_TRANSFER_ID_MAP_MAX_AGE = 60.0 # [second]
"""
This is not a path but a related parameter so it's kept here. Files older that this are not used.
"""
DEFAULT_PUBLIC_REGULATED_DATA_TYPES_ARCHIVE_URL = \
'https://github.com/UAVCAN/public_regulated_data_types/archive/master.zip'
|
[
"pavel.kirienko@gmail.com"
] |
pavel.kirienko@gmail.com
|
32508c5e95c812085d349826080e03e7aafd348b
|
63d3a6255f2677f9d92205d62163b9d22a74c5c7
|
/modules/dynadb/uploadhandlers.py
|
8b258c9efa46a1c569c225170a667d65fd85e717
|
[
"Apache-2.0"
] |
permissive
|
GPCRmd/GPCRmd
|
9204f39b1bfbc800b13512b316e05e54ddd8af23
|
47d7a4e71025b70e15a0f752760873249932c54e
|
refs/heads/main
| 2023-09-04T11:13:44.285629
| 2023-08-29T13:43:01
| 2023-08-29T13:43:01
| 260,036,875
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,126
|
py
|
import os
from django.core.files.uploadhandler import TemporaryFileUploadHandler, StopUpload, StopFutureHandlers
from rdkit.Chem import MolFromMolBlock, ForwardSDMolSupplier
from .customized_errors import MultipleMoleculesinSDF,InvalidMoleculeFileExtension, RequestBodyTooLarge, FileTooLarge, TooManyFiles
from django.conf import settings
from .molecule_properties_tools import MOLECULE_EXTENSION_TYPES
class TemporaryFileUploadHandlerMaxSize(TemporaryFileUploadHandler):
def __init__(self,request,max_size,max_files=1,*args,**kwargs):
self.max_size = max_size
self.max_files = max_files
self.exception = None
self.__acum_size = 0
self.__acum_file_num = 0
self.max_post_size = max_size + 2621440
if hasattr(settings, 'NO_FILE_MAX_POST_SIZE'):
if settings.NO_FILE_MAX_POST_SIZE is not None:
self.max_post_size = max_size + settings.NO_FILE_MAX_POST_SIZE
super(TemporaryFileUploadHandlerMaxSize, self).__init__(request,*args, **kwargs)
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, reset connection.
if content_length > self.max_post_size:
try:
raise RequestBodyTooLarge(self.max_post_size)
except Exception as e:
self.exception = e
raise StopUpload(connection_reset=True)
def new_file(self, *args, **kwargs):
if self.__acum_file_num >= self.max_files:
try:
raise TooManyFiles(self.max_files)
except Exception as e:
self.exception = e
raise StopUpload(connection_reset=True)
return super(TemporaryFileUploadHandlerMaxSize, self).new_file(*args, **kwargs)
def receive_data_chunk(self,raw_data, start):
self.__acum_size += len(raw_data)
if self.__acum_size > self.max_size:
try:
raise FileTooLarge(self.max_size)
except Exception as e:
self.exception = e
raise StopUpload(connection_reset=False)
return super(TemporaryFileUploadHandlerMaxSize, self).receive_data_chunk(raw_data, start)
def file_complete(self, *args, **kwargs):
self.__acum_file_num += 1
return super(TemporaryFileUploadHandlerMaxSize, self).file_complete(*args, **kwargs)
class TemporaryMoleculeFileUploadHandlerMaxSize(TemporaryFileUploadHandlerMaxSize):
def __init__(self,request,max_size,*args,**kwargs):
self.chunk_size=64*2**10
self.filetype = None
self.charset = None
self.__previous_last_line = ''
self.__end_mol_found = False
self.__invalid = False
self.__invalidtoomols = False
super(TemporaryMoleculeFileUploadHandlerMaxSize, self).__init__(request,max_size,*args, **kwargs)
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):
basename, ext = os.path.splitext(file_name)
ext = ext.lower()
ext = ext.strip('.')
self.charset = 'utf-8'
#if charset is None:
#self.charset = 'utf-8'
#else:
#self.charset = charset
if ext in MOLECULE_EXTENSION_TYPES.keys():
self.filetype = MOLECULE_EXTENSION_TYPES[ext]
else:
try:
raise InvalidMoleculeFileExtension(ext=ext)
except Exception as e:
self.exception = e
raise StopUpload(connection_reset=False)
super(TemporaryMoleculeFileUploadHandlerMaxSize, self).new_file(field_name, file_name, content_type, content_length, self.charset, content_type_extra)
raise StopFutureHandlers()
def receive_data_chunk(self,raw_data, start):
if self.filetype == 'sdf':
encoded_raw_data = raw_data.decode(self.charset)
encoded_raw_data = end_of_line_normalitzation(encoded_raw_data)
block = self.__previous_last_line + encoded_raw_data
if self.__end_mol_found and block.strip() != '':
self.__invalidtoomols = True
else:
idx = block.rfind("\n$$$$\n")
if idx > 0:
self.__end_mol_found = True
if len(block) > 6 and block[idx+6:].strip() == '':
self.__invalidtoomols = False
else:
self.__invalidtoomols = True
elif block[0:6] == "$$$$\n":
self.__end_mol_found = True
if len(block) > 5 and block[idx+5:].strip() == '':
self.__invalidtoomols = False
else:
self.__invalidtoomols = True
if self.__invalidtoomols == True:
#self.file.close()
try:
raise MultipleMoleculesinSDF()
except Exception as e:
self.exception = e
raise StopUpload(connection_reset=False)
splited_block = block.rsplit(sep="\n",maxsplit=1)
if len(splited_block) == 1:
self.__previous_last_line = block
else:
self.__previous_last_line = splited_block[1]
return super(TemporaryMoleculeFileUploadHandlerMaxSize, self).receive_data_chunk(raw_data, start)
def file_complete(self, *args, **kwargs):
self.file.filetype = self.filetype
self.file.charset = self.charset
return super(TemporaryMoleculeFileUploadHandlerMaxSize, self).file_complete(*args, **kwargs)
def end_of_line_normalitzation(string):
return string.replace('\r\n','\n').replace('\r','\n')
|
[
"adrian.garcia.recio@gmail.com"
] |
adrian.garcia.recio@gmail.com
|
1530090e1c718d363e1965265df8172db832486d
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/tyk2_input/45/45-42_MD_NVT_rerun/set_2.py
|
a785bc303fba3cc93b06dcf7f2ba4db66e95ff30
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/tyk2/L45/MD_NVT_rerun/ti_one-step/45_42/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
0ef4bd56bca266a23c1257f4970f9dcf5716d062
|
2f82c81b5c6d1033d5d79a1f41a853c84669d4d4
|
/restaurant/restaurantApp/admin.py
|
8921191f30ed469e0ee6b70ff9d518def7f88c69
|
[] |
no_license
|
DarshanN1/UTSTuesdayG52021Aut
|
ba241c88126574cfdd39451468e19a2d68043966
|
345025e675f638e2d1b0a7419996e32c58d6e81a
|
refs/heads/main
| 2023-06-16T08:05:48.230774
| 2021-07-15T02:49:39
| 2021-07-15T02:49:39
| 341,414,511
| 0
| 0
| null | 2021-07-08T12:11:00
| 2021-02-23T03:22:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 353
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Customer)
admin.site.register(StaffMember)
admin.site.register(MenuItem)
admin.site.register(Category)
admin.site.register(OrderItem)
admin.site.register(Order)
admin.site.register(Booking)
admin.site.register(Table)
admin.site.register(Restaurant)
|
[
"dulyacsm15@gmail.com"
] |
dulyacsm15@gmail.com
|
95f997d730d4349f34e763fa45584ee59e322956
|
1b120be95adf8003570164fe05950691b5d5838e
|
/venpo/cli.py
|
01d4c7b8b1bb367fbd7c2ed34b01d8fdaa1061c5
|
[
"MIT"
] |
permissive
|
mfdeux/venpo
|
19949256a5919f7a083d0d08256f3e12261d821c
|
3afac802d81540d9833b8216744e9823188c39f2
|
refs/heads/master
| 2022-11-24T05:18:27.235837
| 2020-08-03T16:09:45
| 2020-08-03T16:09:45
| 284,719,035
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
import click
from .main import USER_AGENT_FILE, load_user_agents, save_data, scrape_profile
@click.command()
@click.argument('username')
@click.option('--filename', default='venmo-txn', help='Filename of which to save scraped data')
def cli(username: str, filename: str):
"""
Extract Venmo transactions from a profile with one command
"""
user_agents = load_user_agents(USER_AGENT_FILE)
profile_data = scrape_profile(username, user_agents)
click.echo(f"Extracted {len(profile_data)} transactions for username '{username}'")
save_data(profile_data, filename)
if __name__ == '__main__':
cli()
|
[
"mrfxyz567@gmail.com"
] |
mrfxyz567@gmail.com
|
fc8d7a1cc3753284ca0a7d7e33b64bc819d8206e
|
0d4742b4393e5a5adb779d10fd9473dc8b3d9cd4
|
/LoginByEmail/wsgi.py
|
10ce9fc8eb87d0879491ed238bc35a3534ddd939
|
[] |
no_license
|
arjuntheprogrammer/LoginByEmailDjango
|
589dbd3cf584c8d52e2082e398d96763d8171ea3
|
313e4028cd37feeda54b9af3a7b364071d86dbb3
|
refs/heads/master
| 2020-04-02T02:26:28.452725
| 2018-10-22T19:09:05
| 2018-10-22T19:09:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for LoginByEmail project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LoginByEmail.settings')
application = get_wsgi_application()
|
[
"saimzn4241@gmail.com"
] |
saimzn4241@gmail.com
|
ae6f820584750c60b673c130ec3650336ee08996
|
1927778b40057958bca95fee27d17e7ab4fbfed7
|
/demucs/grids/mdx_extra.py
|
e32f7cd7538de2ed2ab376e69c135ba886dd6eb8
|
[
"MIT"
] |
permissive
|
ishine/demucs
|
c91a3a745629f9d362344f61bd7444ce7cf3ef12
|
e1f2ed2963f1373fb5e66f4079fc7d4f544e128e
|
refs/heads/master
| 2022-12-02T06:58:21.238933
| 2022-11-17T15:50:07
| 2022-11-17T15:50:07
| 360,808,025
| 0
| 0
|
MIT
| 2021-04-23T08:00:10
| 2021-04-23T08:00:10
| null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
# Copyright (c) Meta, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Main training for the Track A MDX models.
"""
from ._explorers import MyExplorer
from ..train import main
TRACK_B = ['e51eebcc', 'a1d90b5c', '5d2d6c55', 'cfa93e08']
@MyExplorer
def explorer(launcher):
launcher.slurm_(
gpus=8,
time=3 * 24 * 60,
partition='learnlab')
# Reproduce results from MDX competition Track A
# This trains the first round of models. Once this is trained,
# you will need to schedule `mdx_refine`.
for sig in TRACK_B:
while sig is not None:
xp = main.get_xp_from_sig(sig)
sig = xp.cfg.continue_from
for dset in ['extra44', 'extra_test']:
sub = launcher.bind(xp.argv, dset=dset)
sub()
if dset == 'extra_test':
sub({'quant.diffq': 1e-4})
sub({'quant.diffq': 3e-4})
|
[
"alexandre.defossez@gmail.com"
] |
alexandre.defossez@gmail.com
|
065486e3d651931890313eff71b6e5aa7e66e103
|
93a10a77cfed19f6f43987d5f7333c7599990ab1
|
/vpy27/Lib/site-packages/cms/tests/test_site.py
|
9e3c28f77b6ff413a8fcfd0f6da48a13ebf220be
|
[] |
no_license
|
zfanai/vpy27
|
8cd00a49cadccd462276f685dfa30d51cfdfe3d6
|
57ae83d393c569cb632b1ad0bb093a13851e10ed
|
refs/heads/master
| 2021-07-15T20:25:41.383490
| 2017-10-21T02:18:50
| 2017-10-21T02:18:50
| 107,623,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,915
|
py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from django.contrib.sites.models import Site
from cms.api import create_page
from cms.models import Page, Placeholder
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE
from cms.utils import get_cms_setting
from cms.utils.urlutils import admin_reverse
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
self.assertEqual(Site.objects.all().count(), 1)
with self.settings(SITE_ID=1):
u = self._create_user("test", True, True)
# setup sites
self.site2 = Site.objects.create(domain="sample2.com", name="sample2.com", pk=2)
self.site3 = Site.objects.create(domain="sample3.com", name="sample3.com", pk=3)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_site_framework(self):
#Test the site framework, and test if it's possible to disable it
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
response = self.client.get("%s?site__exact=%s" % (URL_CMS_PAGE, self.site3.pk))
self.assertEqual(response.status_code, 200)
create_page("page_3b", "nav_playground.html", "de", site=self.site3)
with self.settings(SITE_ID=self.site3.pk):
create_page("page_3a", "nav_playground.html", "nl", site=self.site3)
# with param
self.assertEqual(Page.objects.on_site(self.site2.pk).count(), 1)
self.assertEqual(Page.objects.on_site(self.site3.pk).count(), 2)
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
with self.settings(SITE_ID=self.site2.pk):
# without param
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
def test_site_preview(self):
page = create_page("page", "nav_playground.html", "de", site=self.site2, published=True)
with self.login_user_context(self.get_superuser()):
response = self.client.get(admin_reverse('cms_page_preview_page', args=[page.pk, 'de']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response._headers['location'][1], 'http://sample2.com/de/?%s&language=de' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
def test_site_publish(self):
self._login_context.__exit__(None, None, None)
pages = {"2": list(range(0, 5)), "3": list(range(0, 5))}
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[3][1]['public'] = True
with self.settings(CMS_LANGUAGES=lang_settings, LANGUAGE_CODE="de"):
with self.settings(SITE_ID=self.site2.pk):
pages["2"][0] = create_page("page_2", "nav_playground.html", "de",
site=self.site2)
pages["2"][0].publish('de')
pages["2"][1] = create_page("page_2_1", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][2] = create_page("page_2_2", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2)
pages["2"][3] = create_page("page_2_1_1", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
pages["2"][4] = create_page("page_2_1_2", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2)
for page in pages["2"]:
page.publish('de')
for page in pages["2"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
with self.settings(SITE_ID=self.site3.pk):
pages["3"][0] = create_page("page_3", "nav_playground.html", "de",
site=self.site3)
pages["3"][0].publish('de')
pages["3"][1] = create_page("page_3_1", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][2] = create_page("page_3_2", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3)
pages["3"][3] = create_page("page_3_1_1", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
pages["3"][4] = create_page("page_3_1_2", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3)
for page in pages["3"]:
page.publish('de')
for page in pages["3"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
def test_site_delete(self):
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
self.assertEqual(Placeholder.objects.count(), 2)
self.site2.delete()
self.assertEqual(Placeholder.objects.count(), 0)
|
[
"zf_sch@126.com"
] |
zf_sch@126.com
|
84cb818bf7c6bcc47943255f6207b78f2f3eea43
|
82d284f9f7d66baf75914614ac1bf56dc3516a9a
|
/SGA_SEVEN/SGA_SEVEN/wsgi.py
|
2b04a6dfb3e75d9e1d6dabff5ea2fe791ae2f36d
|
[] |
no_license
|
WanerAndresVM/SGAseven
|
ed4f11df01e037640c3cf5b0054800578b989909
|
53195177d9b88c282cdf8d2d2fa34f9912783718
|
refs/heads/develop
| 2023-08-01T06:39:23.292451
| 2020-05-13T21:54:46
| 2020-05-13T21:54:46
| 263,747,757
| 0
| 0
| null | 2021-09-22T19:01:14
| 2020-05-13T21:32:52
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for SGA_SEVEN project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SGA_SEVEN.settings')
application = get_wsgi_application()
|
[
"valenciawaner@gmail.com"
] |
valenciawaner@gmail.com
|
68e2f46d01b89ea3bba1ff71f747a494048e4971
|
b63551b3ecbd9ba25285d1a059ca6010b94d8713
|
/mysql/_sqlalchemy.py
|
cdfef705af3611bb43059bb0eab8105498763d61
|
[] |
no_license
|
kinpoll/python-
|
9082749d21c40cc563dd13536e5aa79af62583d0
|
a3e0f3189caf29984d46e95bba8411f2a24fbbed
|
refs/heads/master
| 2020-04-04T21:58:50.330917
| 2018-11-06T01:10:18
| 2018-11-06T01:10:18
| 156,305,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base#(类)
from sqlalchemy import Column, Integer, String
# session会话
from sqlalchemy.orm import sessionmaker
# 创建数据库连接对象
engine = create_engine('mysql+pymysql://root:123456@localhost/db5')
# 创建一个orm基类
Base = declarative_base()
# 创建session会话对象
session = sessionmaker(engine)()
# 继承orm基类Base
class User(Base):
__tablename__ = 't123'
id = Column(Integer, primary_key=True)
name = Column(String(20))
phnumber = Column(String(11), unique=True)
# 添加表记录
def add_data(self):
p = User(id=1, name='Lucy', phnumber='13838383838')
session.add(p)
session.commit()
def select_data(self):
result=session.query(User).filter_by(id=1).get_select()
#result是一个列表,列表是对象
for r in result:
print(r.id,r.name)
# 提交到数据库执行
Base.metadata.create_all(engine)
if __name__ =='__main__':
s = User()
s.add_data()
s.select_data()
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
3acc4bbf31aad07722de7810acf23fa4861eb307
|
5759f0579782a52b289fe62455b790d7ad91bdf8
|
/cracks_nms.py
|
19eaa72e393a8eebc000c92d755842f1148fb352
|
[] |
no_license
|
ShabeerMohammad/Deep-Custom
|
19f7acd75b10187ede45071f401c153ce6cc9102
|
9baef2e6b89d524e76093b29b6d31ea3fa250c00
|
refs/heads/main
| 2023-05-11T13:59:58.067584
| 2021-05-20T16:17:26
| 2021-05-20T16:17:26
| 368,842,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,437
|
py
|
from matplotlib import pyplot
from tensorflow.keras.preprocessing.image import load_img,img_to_array
#from mrcnn.visualize import display_instances
from matplotlib.patches import Rectangle
from mrcnn.model import MaskRCNN
import glob
import cv2
from mrcnn.config import Config
import numpy as np
from mrcnn.model import load_image_gt
from mrcnn.model import mold_image
from numpy import expand_dims
from mrcnn.utils import compute_ap
from numpy import mean
from display import display_instances,apply_mask
# define the prediction configuration
class PredictionConfig(Config):
# define the name of the configuration
NAME = "cracks_cfg"
# number of classes (background + crack)
NUM_CLASSES = 1 + 1
# simplify GPU config
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# calculate the mAP for a model on a given dataset
def evaluate_model(dataset, model, cfg):
APs = list()
for image_id in dataset.image_ids:
# load image, bounding boxes and masks for the image id
image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(dataset, cfg, image_id, use_mini_mask=False)
# convert pixel values (e.g. center)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
yhat = model.detect(sample, verbose=0)
# extract results for first sample
r = yhat[0]
# calculate statistics, including AP
AP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
# store
APs.append(AP)
# calculate the mean AP across all images
mAP = mean(APs)
return mAP
# create config
cfg = PredictionConfig()
# define the model
model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
# load model weights
model.load_weights('/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/cracks_cfg20210427T1057/mask_rcnn_cracks_cfg_0003.h5', by_name=True)
# evaluate model on training dataset
train_mAP = evaluate_model(train_set, model, cfg)
print("Train mAP: %.3f" % train_mAP)
# evaluate model on test dataset
test_mAP = evaluate_model(test_set, model, cfg)
print("Test mAP: %.3f" % test_mAP)
# plot a number of photos with ground truth and predictions
def plot_actual_vs_predicted(dataset, model, cfg, n_images=5):
# load image and mask
for i in range(n_images):
# load the image and mask
image = dataset.load_image(i)
#image1 = dataset.load_image(i)
mask, class_ids = dataset.load_mask(i)
# convert pixel values (e.g. center)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
yhat = model.detect(sample, verbose=0)[0]
# define subplot
pyplot.figure(figsize=(39,29))
#pyplot.subplot(n_images, 2, i*2+1)
# plot raw pixel data
pyplot.imshow(image)
pyplot.title('Actual')
figure = pyplot.gcf()
figure.set_size_inches(12, 15)
pyplot.savefig('./output/actual{}.jpg'.format(i),dpi=100)
# plot masks
for j in range(mask.shape[2]):
pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.5,aspect='auto')
# get the context for drawing boxes
#pyplot.figure(figsize=(39,29))
#pyplot.subplot(n_images, 2, i*2+2)
# plot raw pixel data
pyplot.imshow(image)
pyplot.title('Predicted')
ax = pyplot.gca()
# plot each box
for box in yhat['rois']:
# get coordinates
y1, x1, y2, x2 = box
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='blue')
# draw the box
ax.add_patch(rect)
# show the figure
#figure = pyplot.gcf()
#figure.set_size_inches(50, 40)
#pyplot.savefig('./output/predicted{}.jpg'.format(i),dpi=100)
#con = concatenate((image,image1),axis=1)
#cv2.imwrite('/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/output/crack{}'.format(i),con)
#pyplot.savefig('./output/predicted{}.jpg'.format(i),dpi=100)
figure = pyplot.gcf()
figure.set_size_inches(12, 15)
pyplot.savefig('./output/predicted{}.jpg'.format(i),dpi=100)
#pyplot.show()
# plot predictions for train dataset
plot_actual_vs_predicted(train_set, model, cfg)
# plot predictions for test dataset
plot_actual_vs_predicted(test_set, model, cfg)
new_set = CracksDataset()
new_set.load_dataset('input')
new_set.prepare()
print('Test: %d' % len(test_set.image_ids))
plot_actual_vs_predicted('300.jpeg', model, cfg)
class PredictionConfig(Config):
# define the name of the configuration
NAME = "cracks_cfg"
# number of classes (background + crack)
NUM_CLASSES = 1 + 1
# simplify GPU config
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# create config
cfg = PredictionConfig()
# define the model
model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
# load model weights
model.load_weights('mask_rcnn_cracks_cfg_0003.h5', by_name=True)
img = load_img('/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/input/images/301.jpeg')
img = img_to_array(img)
def draw_image_with_boxes(filename,boxes_list):
data = pyplot.imread(filename)
pyplot.figure(figsize=(15,20))
pyplot.imshow(data)
ax = pyplot.gca()
for box in boxes_list:
y1,x1,y2,x2 = box
width,height = x2-x1,y2-y1
rect = Rectangle((x1,y1),width,height,fill=False,color='blue',lw=2)
ax.add_patch(rect)
pyplot.show()
class_names = ['BG','Crack']
path = '/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/input/images/*.*'
for num,InputImage in enumerate(glob.glob(path)):
img = load_img(InputImage)
img1 = img_to_array(img)
# cv2.imwrite('/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/output/actual{}.jpg'.format(num),img1)
results = model.detect([img1],verbose=0)
draw_image_with_boxes(InputImage,results[0]['rois'])
r = results[0]
display_instances(img1,r['rois'],r['masks'],r['class_ids'],class_names,r['scores'])
# cv2.imwrite('/home/smohammad/Projects/Custom-Cracks/Label_Crack+Nocrack/output/predicted{}.jpg'.format(num),img1)
def box_iou(boxes1, boxes2):
"""Compute IOU between two sets of boxes of shape (N,4) and (M,4)."""
# Compute box areas
box_area = lambda boxes: ((boxes[:, 2] - boxes[:, 0]) *
(boxes[:, 3] - boxes[:, 1]))
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clip(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
unioun = area1[:, None] + area2 - inter
return inter / unioun
def nms(boxes, scores, iou_threshold):
# sorting scores by the descending order and return their indices
B = scores.argsort()[::-1]
keep = [] # boxes indices that will be kept
while B.size > 0:
i = B[0]
keep.append(i)
if B.size == 1: break
iou = box_iou(boxes[i, :].reshape(-1, 4),
boxes[B[1:], :].reshape(-1, 4)).reshape(-1)
inds = np.nonzero(iou <= iou_threshold)[0]
B = B[inds + 1]
return np.array(keep, dtype=np.int32, ctx=boxes.ctx)
|
[
"noreply@github.com"
] |
ShabeerMohammad.noreply@github.com
|
51c3379cd586914315f9820e95e92bc7f4309e0f
|
69c8862c03eadc2132cf6e841da73fe397a53672
|
/utilizadores/migrations/0001_initial.py
|
2dfdab42387a84f8f340a667d71509fbc137d65d
|
[] |
no_license
|
svigor/ULTIMATE_LES
|
9eddbc7cf50e41625a98cd0bd0e893e9eae40e19
|
5f5b7f56d8ca88d5d5122bcb91dc95a466de8d01
|
refs/heads/master
| 2023-06-01T00:31:46.738186
| 2023-05-28T20:46:19
| 2023-05-28T20:46:19
| 351,144,973
| 0
| 0
| null | 2022-03-15T17:58:32
| 2021-03-24T16:12:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
# Generated by Django 3.2 on 2021-04-21 15:07
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Utilizador',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.user')),
('contacto', models.CharField(max_length=20)),
('valido', models.CharField(max_length=255)),
],
options={
'db_table': 'Utilizador',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Administrador',
fields=[
('utilizador_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='utilizadores.utilizador')),
('gabinete', models.CharField(max_length=255)),
],
options={
'db_table': 'Administrador',
},
bases=('utilizadores.utilizador',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Participante',
fields=[
('utilizador_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='utilizadores.utilizador')),
],
options={
'db_table': 'Participante',
},
bases=('utilizadores.utilizador',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Proponente',
fields=[
('utilizador_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='utilizadores.utilizador')),
],
options={
'db_table': 'Proponente',
},
bases=('utilizadores.utilizador',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"variormerfi@gmail.com.com"
] |
variormerfi@gmail.com.com
|
3575325c24967392841f9f453c832b41d751575f
|
a268cd67916fbf33ec0c18fce9ba883c3aae005c
|
/iputils/iputils.py
|
88c4ec63eb178fdb2a4d732f7d270667d3269cab
|
[] |
no_license
|
adamacosta/OMSCS-6250-Examples
|
152ea13ab53ac94ed5b0f886b56e143067e86416
|
179801320162c7135c9e47ba3e7188b398175156
|
refs/heads/master
| 2021-01-01T20:48:07.267554
| 2015-06-09T14:45:55
| 2015-06-09T14:45:55
| 37,002,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,778
|
py
|
import re
from numpy import binary_repr
from bitarray import bitarray
def _gen_octet(bits):
exec('b = ' + '0b' + bits)
return str(b)
def _bits_to_string(bits):
return bits.to01()
def _bits_to_dotted_quad(bits):
return '.'.join(list(map(_gen_octet, [bits[:8].to01(),
bits[8:16].to01(),
bits[16:24].to01(),
bits[24:32].to01()])))
def _dotted_quad_to_bits(quad):
return bitarray(''.join(list(map(binary_repr,
list(map(int, quad.split('.'))), [8] * 4))))
def _is_valid_ip(addr):
if not re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', addr):
return False
if sum([int(i)<0 or int(i)>255 for i in addr.split('.')]):
return False
return True
def _is_valid_cidr(addr):
if not re.match('([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{2}', addr):
return False
if not _is_valid_ip(addr.split('/')[0]):
return False
if int(addr.split('/')[1])<0 or int(addr.split('/')[1])>32:
return False
return True
class IP:
"""Return an IP object.
Attributes
----------
bits -- A 32-bit bitarray formed by concatenating the binary
representations of each octet of the IP address.
Methods
-------
getbits() -> bitarray
tostring() -> string
tobitstring() -> string
lpm() -> int
"""
def __init__(self, addr):
"""Initialize an IP object.
The argument to IP must be a string in dotted quad format
consisting of four integers in the range 0 to 255,
separated by a single dot.
"""
if not _is_valid_ip(addr):
raise TypeError('Not a valid IP')
self.bits = _dotted_quad_to_bits(addr)
def getbits(self):
"""Return the 32-bit bitarray representing the IP address."""
return self.bits
def tostring(self):
"""Return a string representation in dotted quad format."""
return _bits_to_dotted_quad(self.bits)
def tobitstring(self):
"""Return a 32-bit string showing hardware layout of address."""
return _bits_to_string(self.bits)
def lpm(self, other):
"""Return the longest prefix match with a CIDR block."""
assert(isinstance(other, CIDRBlock))
if (self.bits ^ other.first).index(True) >= other._mask:
return other._mask
else:
return 0
def __eq__(self, other):
return self.getbits() == other.getbits()
class CIDRBlock:
"""Return a CIDRBlock object.
Attributes
----------
first -- First valid IP address in the range.
last -- Last valid IP address in the range.
bitmask -- Bit representation of subnet mask
Methods
-------
tostring() -> string
getrange() -> (string, string)
getmask() -> string
matches() -> boolean
"""
def __init__(self, addr):
"""Initialize a CIDRBlock object.
The argument to CIDRBlock must be a string in dotted quad format
consisting of four integers in the range 0 to 255, each separated
by a single dot, with a trailing slash and subnet mask length that
must be in the range 0 to 32.
It is possible to pass an invalid CIDR block, e.g. a block with 1
bits set past the length of the subnet mask, and the constructor
will return a valid CIDR block by zeroing all invalid 1 bits.
"""
if not _is_valid_cidr(addr):
raise TypeError('Not a valid CIDR')
self.first = _dotted_quad_to_bits(addr.split('/')[0])
self._mask = int(addr.split('/')[1])
self.bitmask = bitarray('1'*self._mask + '0'*(32 - self._mask))
self.first &= self.bitmask
self.last = self.first | ~self.bitmask
def tostring(self):
"""Return string representation in CIDR format."""
return _bits_to_dotted_quad(self.first) + '/' + str(self._mask)
def getrange(self):
"""Return the first and last address in the block."""
return _bits_to_dotted_quad(self.first), _bits_to_dotted_quad(self.last)
def getmask(self):
"""Return the subnet mask associated with this CIDR block."""
return _bits_to_dotted_quad(self.bitmask)
def matches(self, other):
"""Return True if IP address is in block."""
assert(isinstance(other, IP))
return self.first[:self._mask] == other.getbits()[:self._mask]
def __eq__(self, other):
return self.tostring() == other.tostring()
class trie:
# TODO: implement a trie
pass
def ip_parser(addr):
"""Try to parse a string in dotted quad format.
Arguments
---------
addr -- A string representing an IP address.
Returns
-------
IP -- An IP object is returned if the input string has no trailing
slash and mask.
CIDRBlock -- A CIDRBlock object is returned if the input string does
have a trailing slash and mask.
Raises
------
TypeError if string is not parsable to either format.
"""
if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{2}', addr):
return CIDRBlock(addr)
if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', addr):
return IP(addr)
raise TypeError('Not a proper IP address')
|
[
"acosta.adam@gmail.com"
] |
acosta.adam@gmail.com
|
3fdcc5ff4ac4f4111bcb8147b7b2b25b259e93a9
|
00fa2a8f248870316b1f582ea2baebc8e86f6363
|
/py/prospect/mycoaddcam.py
|
f6803d7b2b13beb89950bb6cb04d71946b41b775
|
[] |
no_license
|
moustakas/prospect
|
d9521aaf279707707553338ab659e847dbf470aa
|
cd445bcf3912d834f17587eff2c28cde6b7fe7a8
|
refs/heads/master
| 2020-12-29T05:45:45.426590
| 2020-02-06T15:34:12
| 2020-02-06T15:34:12
| 238,477,786
| 0
| 0
| null | 2020-02-05T15:05:01
| 2020-02-05T15:05:00
| null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
# EA - Oct 2019 (Temporary / preliminary)
# desispec.coaddition.coadd_cameras() unsatisfying at least since
# 1) don't want to coadd over exposures / 2) cannot assume waves are aligned over arms (r/z mismatch seen in datachallenge)
import numpy as np
from desispec.interpolation import resample_flux
def mycoaddcam(spectra) :
""""
Merges brz spectra into a single (wave,flux)
takes into account noise and mis-matched wavelengths over the 3 arms
Currently assumes b r z bands and two overlap regions
"""
assert np.all([ band in spectra.wave.keys() for band in ['b','r','z'] ])
# Define (arbitrarily) wavelength grid
margin = 20 # Angstrom. Avoids using edge-of-band at overlap regions
wave = spectra.wave['b'].copy()
wave = wave[ (wave<np.max(wave)-margin) ]
tolerance = 0.0001
length_bands = {'b' : wave.size}
w_bands = {'b' : np.arange(wave.size)}
for band in ['r','z'] :
if band=='z' : w_bands[band], = np.where( spectra.wave[band]>wave[-1]+tolerance )
else : w_bands[band], = np.where( (spectra.wave[band]>wave[-1]+tolerance)
& (spectra.wave[band]<np.max(spectra.wave[band])-margin) )
wave=np.append(wave,spectra.wave[band][w_bands[band]])
length_bands[band] = w_bands[band].size
nwave = wave.size
nspec = spectra.num_spectra()
flux = np.zeros((nspec,nwave),dtype=spectra.flux['b'].dtype)
ivar = np.zeros((nspec,nwave),dtype=spectra.ivar['b'].dtype)
# Flux in non-overlapping waves
i = 0
for band in ['b', 'r', 'z'] :
flux[:,i:i+length_bands[band]] = spectra.flux[band][:,w_bands[band]]
ivar[:,i:i+length_bands[band]] = spectra.ivar[band][:,w_bands[band]]
i += length_bands[band]
# Overlapping regions
overlaps = ['br','rz']
for the_overlap in overlaps :
b1, b2 = the_overlap[0], the_overlap[1]
w_overlap, = np.where( (wave > spectra.wave[b2][0]) & (wave < spectra.wave[b1][-1]) )
assert (w_overlap.size > 0)
lambd_over = wave[w_overlap]
for ispec in range(nspec) :
phi1, ivar1 = resample_flux(lambd_over, spectra.wave[b1], spectra.flux[b1][ispec,:], ivar=spectra.ivar[b1][ispec,:])
phi2, ivar2 = resample_flux(lambd_over, spectra.wave[b2], spectra.flux[b2][ispec,:], ivar=spectra.ivar[b2][ispec,:])
ivar[ispec,w_overlap] = ivar1+ivar2
w_ok = np.where( ivar[ispec,w_overlap] > 0)
flux[ispec,w_overlap] = (phi1+phi2)/2
flux[ispec,w_overlap][w_ok] = (ivar1[w_ok]*phi1[w_ok] + ivar2[w_ok]*phi2[w_ok])/ivar[ispec,w_overlap][w_ok]
return (wave, flux, ivar)
|
[
"eric.armengaud@cea.fr"
] |
eric.armengaud@cea.fr
|
1ac8b41806a067abca1fdc45593dc680c0a3e844
|
ad0a9af0e6e17db1a1ab1ce001e7fd310a41ff91
|
/smtp_proj/manage.py
|
51e717400e4637b45d1ccf203fe5d427dc77045c
|
[] |
no_license
|
matthewgstillman/nba_api
|
c73e6f4b086ca9ceaf2009a691472e8356899c5c
|
4f840bd0002f201e8696d2a6d82a2fefe1170f7a
|
refs/heads/master
| 2022-02-21T01:27:21.512006
| 2019-08-16T02:50:51
| 2019-08-16T02:50:51
| 198,093,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smtp_proj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"matthewgstillman@gmail.com"
] |
matthewgstillman@gmail.com
|
90d4943fbffe4b239c2566d420ef7fd466efed78
|
91f4078045a57eaaafe0b172909d7041e829941c
|
/arjuna/interact/gui/dispatcher/driver/driver_commands.py
|
b53c495a8f2550697d250ee1544d207518bd6a0b
|
[
"Apache-2.0"
] |
permissive
|
amiablea2/arjuna
|
0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9
|
af74e0882216881ceca0a10f26442165ffc43287
|
refs/heads/master
| 2023-08-21T20:04:30.416303
| 2021-10-27T06:41:40
| 2021-10-27T06:41:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,022
|
py
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.webdriver.common.action_chains import ActionChains
class DriverCommands:
@classmethod
def go_to_url(cls, driver, url):
driver.get(url)
@classmethod
def refresh_browser(cls, driver):
driver.refresh()
@classmethod
def go_back_in_browser(cls, driver):
driver.back()
@classmethod
def go_forward_in_browser(cls, driver):
driver.forward()
@classmethod
def quit(cls, driver):
driver.quit()
@classmethod
def get_page_title(cls, driver):
return driver.title
@classmethod
def get_url(cls, driver):
return driver
@classmethod
def get_source(cls, driver):
return driver.page_source
@classmethod
def send_keys(cls, driver, key_str):
ActionChains(driver).send_keys(key_str).perform()
@classmethod
def is_web_alert_present(cls, driver):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
WebDriverWait(driver, 1).until(EC.alert_is_present(),'Timed out.')
return True
except Exception as e:
return False
@classmethod
def confirm_web_alert(cls, driver):
driver.switch_to.alert.accept()
@classmethod
def dismiss_web_alert(cls, driver):
driver.switch_to.alert.dismiss()
@classmethod
def send_text_to_web_alert(cls, driver, text):
driver.switch_to.alert.send_keys(text)
@classmethod
def get_text_from_web_alert(cls, driver):
return driver.switch_to.alert.text
@classmethod
def focus_on_frame(cls, driver, element):
driver.switch_to.frame(element)
@classmethod
def focus_on_dom_root(cls, driver):
return driver.switch_to.default_content()
@classmethod
def focus_on_parent_frame(cls, driver):
driver.switch_to.parent_frame()
@classmethod
def execute_javascript(cls, driver, script, *args):
from arjuna import log_debug
log_debug("Executing JavaScript {} with args {}.".format(script, args))
return driver.execute_script(script, *args)
@classmethod
def take_screenshot(cls, driver, file_path):
return driver.save_screenshot(file_path)
@classmethod
def take_screenshot_as_base64(cls, driver):
return driver.get_screenshot_as_base64()
@classmethod
def set_window_size(cls, driver, width, height):
driver.set_window_size(width, height)
@classmethod
def maximize_window(cls, driver):
driver.maximize_window()
@classmethod
def get_current_window_handle(cls, driver):
return driver.current_window_handle
@classmethod
def focus_on_window(cls, driver, window_handle):
driver.switch_to.window(window_handle)
@classmethod
def close_current_window(cls, driver):
driver.close()
@classmethod
def get_window_title(cls, driver):
return driver.title
@classmethod
def get_current_window_size(cls, driver):
return driver.get_window_size()
@classmethod
def get_all_winodw_handles(cls, driver):
return driver.window_handles
@classmethod
def replace_with_element(cls, setu_driver, value_tuple):
if value_tuple[1] == True:
return setu_driver.get_element_for_setu_id(value_tuple[0])
else:
return value_tuple[0]
@classmethod
def perform_action_chain(cls, setu_driver, driver, action_chain):
chain = ActionChains(driver)
for action in action_chain:
kwargs = {k:cls.replace_with_element(setu_driver, v) for k,v in action[1].items()}
getattr(chain, action[0])(**kwargs)
chain.perform()
@classmethod
def hover_on_element(cls, driver, webelement):
chain = ActionChains(driver).move_to_element(webelement).perform()
@classmethod
def mouse_click_on_element(cls, driver, webelement):
chain = ActionChains(driver).click(webelement).perform()
@classmethod
def double_click_on_element(cls, driver, webelement):
chain = ActionChains(driver).double_click(webelement).perform()
@classmethod
def scroll_to_element(cls, driver, webelement):
cls.execute_javascript(driver, "arguments[0].scrollIntoView(true);", webelement)
|
[
"rahulverma81@gmail.com"
] |
rahulverma81@gmail.com
|
1a2cbca2b47e204576ed75b0558589bb3685657d
|
79fd905786eeb0ab21ca1945a3305a18a2d5176a
|
/Restart.py
|
9b38684ae7d162ce5869a42e76efaf38c89818f4
|
[] |
no_license
|
sushil1024/Computer-Troubleshoot-and-Intelligent-System
|
8e5f6608513358305d87f2a67cc276d39643dabd
|
2c2cbcd05163dc4c0b2655b74b263acc53175705
|
refs/heads/main
| 2023-08-28T00:49:39.808386
| 2021-10-25T12:03:35
| 2021-10-25T12:03:35
| 384,628,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
#importing os which is used for interaction with the operating system
import os
#choice to whether or not restart the computer
restart = input("Are you sure ? (yes / no): ")
if restart == 'yes':
#restart the computer
os.system("shutdown /r /t 1")
elif restart == 'no':
print("Cancelled")
else:
print("Enter valid choice!")
|
[
"noreply@github.com"
] |
sushil1024.noreply@github.com
|
172b8fbbdf7493697776b058232fbe4c91b4d476
|
06c07c7a791e3487ddc84b1ab4e14769c23c2460
|
/path_finder.py
|
ace25c5cfd6a7cae8a8e15471080c4540aaed294
|
[] |
no_license
|
jimstevens2001/AI-Pacman
|
a0eb0004ebda7b30c6a1a09e3afc2b97f9273afd
|
0e2b26b28b5ce0ed6845e6f12f2134e203d79763
|
refs/heads/master
| 2020-05-17T20:29:46.595727
| 2011-05-07T16:24:28
| 2011-05-07T16:24:28
| 1,675,391
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,631
|
py
|
from common import *
import random
class PF:
#------------------------------------------------------------------------------
# __init__()
#------------------------------------------------------------------------------
def __init__(self, game):
self.game = game
self.random = random.Random()
#------------------------------------------------------------------------------
# astar()
#------------------------------------------------------------------------------
def astar(self, start, goal):
"A* algorithm. Finds the optimal path from start to goal."
unnormalized_goal = goal
# The start position needs to be normalized to a tile. An agent may
# be at any location in a tile, but we only care about which tile they
# are currently in.
tile_size = self.game.manager.config_options['tile_size']
start_tile = self.game.checker.get_tile_coordinates(start)
start = (start_tile[0] * tile_size, start_tile[1] * tile_size)
goal_tile = self.game.checker.get_tile_coordinates(goal)
goal = (goal_tile[0] * tile_size, goal_tile[1] * tile_size)
self.goal = goal
#start = ((start[0] / tile_size) * tile_size, (start[1] / tile_size) * tile_size)
#goal = (( goal[0] / tile_size) * tile_size, ( goal[1] / tile_size) * tile_size)
# The list of explored states
closed = []
# The priority queue for states we still need to look at
q = [[start]]
# Look at each item in the priority queue and check to see if it is the
# goal.
while q != []:
# Get the first item in our sorted queue
p = q.pop(0)
# This is the actual position
x = p[-1] # the position that is closest to the goal
if( x in closed ):
# If x is in the closed list, then we have already checked it and
# expanded it and we don't need to do it again.
continue
if( x == goal ):
# If x is the goal, then we need to return the path from the
# starting position to the goal
return p
# Since x was not the goal, we need to append it to the closed list
# so that it doesn't get checked again.
closed.append(x)
# Enqueue all of the successors of x into the priority queue
for y in self.successors(x):
self.enqueue(q, p, y)
# If the queue ever becomes empty and a path has not been returned,
# then the algorithm could not find a valid path from the start
# position to the goal.
print "start: ", start
print "goal: ", goal
print "unnormalized goal: ", unnormalized_goal
raise PacmanError('A* failed to find a path')
#------------------------------------------------------------------------------
# apply_move()
#------------------------------------------------------------------------------
def apply_move(self, pos, move):
"Applies a move to a position, returning a new position."
return (pos[0] + move[0], pos[1] + move[1])
#------------------------------------------------------------------------------
# successors()
#------------------------------------------------------------------------------
def successors(self, pos):
"Expands a position into all of its valid next positions."
retval = []
# Each step has to be a tile
step = self.game.manager.config_options['tile_size']
# Possible next positions are up, down, left and right of the current
# position.
local_dir = [(0,-step), (0,step), (-step,0), (step,0)]
# Look at all of the possible next positions and add them to the return
# list if they are valid moves.
while local_dir != []:
next = local_dir.pop(self.random.randint(0, len(local_dir)-1))
newpos = self.apply_move( pos, next )
# Only use the new position if it is a valid move.
if self.isvalid(newpos):
retval.append(newpos)
return retval
#------------------------------------------------------------------------------
# isvalid()
#------------------------------------------------------------------------------
def isvalid(self, pos):
"Checks to see if the position is valid."
# If a position is not a wall and it is not out of bounds, then it is
# a valid next position.
# TODO: This is a hack. We need to track down the A* error
if self.game.checker.out_of_bounds(pos):
#if self.game.checker.out_of_bounds2(pos):
return False
elif self.game.checker.is_wall(pos):
return False
else:
return True
#------------------------------------------------------------------------------
# enqueue()
#------------------------------------------------------------------------------
def enqueue(self, q, p , y):
"Enqueues a path p with next position y into priority queue q."
q.append(p + [y])
q.sort(self.path_cmp)
#------------------------------------------------------------------------------
# path_cmp()
#------------------------------------------------------------------------------
def path_cmp(self, x, y):
"Compares the value of the f function of two paths. Used to sort a \
list of paths."
if self.f(x) > self.f(y):
return 1
elif self.f(x) == self.f(y):
return 0
else:
return -1
#------------------------------------------------------------------------------
# f()
#------------------------------------------------------------------------------
def f(self, p):
"Returns the value of the A* heuristic of the heuristic, h, plus the \
depth len(p)."
return len(p) - 1 + self.h(p[-1])
#------------------------------------------------------------------------------
# h()
#------------------------------------------------------------------------------
def h(self, s):
"Returns the value of the heuristic function. This is the Manhattan \
distance."
return abs(self.goal[0] - s[0]) + abs(self.goal[1] - s[1])
|
[
"jims@cs.umd.edu"
] |
jims@cs.umd.edu
|
415e976a4adc1c0c915435715d74dd585e196784
|
161a765674b837cf4a2f48879a987dce3f40d15f
|
/api_dev/blog/migrations/0005_auto_20210613_0419.py
|
688daf888b9c485f7616526297a1b43520fd6e65
|
[
"MIT"
] |
permissive
|
ManhTai01/api
|
19662cae94f9182dabb183a37bf7715be04254d9
|
09d7a8dd0a11c1552c34cd9006c5afae94897afc
|
refs/heads/master
| 2023-05-23T17:09:36.339576
| 2021-06-14T12:19:13
| 2021-06-14T12:19:13
| 376,811,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
# Generated by Django 3.1.12 on 2021-06-13 04:19
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20210613_0419'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='create_at',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 13, 4, 19, 36, 638507, tzinfo=utc)),
),
migrations.AlterField(
model_name='blog',
name='update_at',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 13, 4, 19, 36, 638532, tzinfo=utc)),
),
]
|
[
"lemanhtaictbp@gmail.com"
] |
lemanhtaictbp@gmail.com
|
3d6b83db1676023fa24d0d85d6002eab0aaa8483
|
d7214b7c62d32c22d18023a827ef35fe483163ad
|
/Archive/tdsr_dec.py
|
9d73c7a066c2daf5d39adfe0b5c98cef553235de
|
[] |
no_license
|
shawn-y-sun/ReportingAnalytics
|
85058608126486b425161efd8edff0e7ba84b609
|
10504711a5c512fce81f061c43ce8c6edc672721
|
refs/heads/main
| 2023-07-13T06:32:57.793954
| 2021-08-27T18:21:00
| 2021-08-27T18:21:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
from Reporting import load_data, Write, st_tdsr, template_tdsr_dec
import time
import datetime
file_path = r'C:\Users\sunsh\Documents\TDSR A Decline\TDSRA_over65_makingitthrough_noTDSRrule.xlsx'
if __name__ == "__main__":
start = time.time()
################################################
df = load_data(file_path)
# print(df.shape)
# print('*************************')
# print('Generating Report ...')
# a = Write(template_tdsr_dec, df, st_tdsr)
# print(a.df.shape)
# a.time_frame(frame=(20180601, 20190831))
# print(a.df.shape)
# b = a.copy()
# print(b.df.shape)
# print(a.df_vlt_ttl.shape)
# b.df = a.df_vlt_ttl
# print(b.df.shape)
# print(b.df['FINAL_DECISION'].unique())
# print(b.df['UW58_ACT'].min())
# b.write()
a = Write(template_tdsr_dec, df)
a.write()
###############################################
end = time.time()
time_secs = end - start
time_str = str(datetime.timedelta(seconds=time_secs))[:-4]
print(f'[Runtime of the program is {time_str}]')
|
[
"noreply@github.com"
] |
shawn-y-sun.noreply@github.com
|
380081a06fd4a15bd7a9daedd5e5203a518b85a4
|
bf226f09fbcdb9be737f85cee6609afb9e896b72
|
/server/models.py
|
fa1d14930d731e599302e6ed2c8787fd44b6a257
|
[] |
no_license
|
AymirAydinli/AirlineFlask
|
bd8c0c975cd74e8af6ebe8d5f72d3c415ddeee8e
|
c6d4a47499cebf2e926c0a3012bb11b4b996cc6f
|
refs/heads/master
| 2020-12-27T05:48:12.630606
| 2020-02-27T17:22:02
| 2020-02-27T17:22:02
| 237,784,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,257
|
py
|
from server import db, login_manager
from datetime import datetime
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
first_name = db.Column(db.String(20), nullable=False)
second_name = db.Column(db.String(20))
gender = db.Column(db.String(10))
password = db.Column(db.String(60), nullable=False)
dateOfBirth = db.Column(db.Date)
email = db.Column(db.String(20), unique=True, nullable=False)
document_number = db.Column(db.String(20), unique=True, nullable=False)
address = db.relationship('Psngr_Adrs_Dtls', backref='adrs', lazy=True)
docs = db.relationship('Psngr_Doc_Dtls', backref='user', lazy=True)
ticket_user = db.relationship('Flight_Details', backref='user1', lazy=True)
def __repr__(self):
return f"User('{self.first_name}','{self.second_name}','{self.email}','{self.gender}','{self.dateOfBirth}')"
class Psngr_Adrs_Dtls(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
phoneNumber = db.Column(db.Integer, nullable=False)
country = db.Column(db.String(30), nullable=False)
city = db.Column(db.String(30), nullable=False)
street = db.Column(db.String(30), nullable=False)
building = db.Column(db.String(10), nullable=False)
apartment = db.Column(db.String(10), nullable=False)
zip = db.Column(db.String(10), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Psngr_Adrs_Dtls('{self.country}','{self.city}','{self.street}','{self.building}','{self.zip}')"
class Psngr_Doc_Dtls(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
document_type = db.Column(db.String(10), nullable=False)
nationality = db.Column(db.String(30), nullable=False)
dateOfDocExp = db.Column(db.Date, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Psngr_Doc_Dtls('{self.document_type}','{self.nationality}','{self.dateOfDocExp}')"
class Flight_Details(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
departure_date = db.Column(db.Date)
arrival_date = db.Column(db.Date)
passenger_count = db.Column(db.Integer, nullable=False)
fare = db.Column(db.Integer, nullable=True)
airport_id = db.Column(db.Integer, db.ForeignKey('airport.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Flight_Details('{self.departure_date}','{self.arrival_date}', '{self.fare}', '{self.user_id}', '{self.airport_id}')"
class Airport(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
departure = db.Column(db.String(20), nullable=False, default=datetime.utcnow())
arrival = db.Column(db.String(20), nullable=False)
flight_det = db.relationship('Flight_Details', backref='user2', lazy=True)
def __repr__(self):
return f"Airport('{self.Airport_name}','{self.departure}','{self.arrival}')"
|
[
"Ay@Aydinlis-MacBook-Pro.local"
] |
Ay@Aydinlis-MacBook-Pro.local
|
aa8c44690bf5d7007a049f7c7dc999404472d4db
|
61bc53ec90d92aece91753ec5ec9d25e0879a1e2
|
/content/vqa-maskrcnn-benchmark/tests/test_data_samplers.py
|
54e550dc141d2efe37aaf02f0ecf7de1ede58b48
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
aluka1994/textvqa
|
08a16c9b21ea9c5eca05f5d4d1763c190d2d7275
|
694cb2be08def519ba73be78e34664afa2c607b5
|
refs/heads/master
| 2021-05-26T23:44:21.973827
| 2020-04-08T22:05:58
| 2020-04-08T22:05:58
| 254,190,630
| 0
| 0
|
MIT
| 2020-04-08T20:14:11
| 2020-04-08T20:14:10
| null |
UTF-8
|
Python
| false
| false
| 5,380
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import random
import unittest
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data.sampler import RandomSampler
from maskrcnn_benchmark.data.samplers import GroupedBatchSampler
from maskrcnn_benchmark.data.samplers import IterationBasedBatchSampler
class SubsetSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
class TestGroupedBatchSampler(unittest.TestCase):
def test_respect_order_simple(self):
drop_uneven = False
dataset = [i for i in range(40)]
group_ids = [i // 10 for i in dataset]
sampler = SequentialSampler(dataset)
for batch_size in [1, 3, 5, 6]:
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
merged_result = list(itertools.chain.from_iterable(result))
self.assertEqual(merged_result, dataset)
def test_respect_order(self):
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
expected = [
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
[[0, 1, 3], [2, 4, 5], [6, 9], [7, 8]],
[[0, 1, 3, 6], [2, 4, 5, 7], [8], [9]],
]
for idx, batch_size in enumerate([1, 3, 4]):
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
self.assertEqual(result, expected[idx])
def test_respect_order_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 1, 3], [2, 4, 5]]
self.assertEqual(result, expected)
def test_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([0, 3, 5, 6, 7, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 3, 6], [5, 7, 8]]
self.assertEqual(result, expected)
def test_permute_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[5, 8], [0, 6, 1], [3]]
self.assertEqual(result, expected)
def test_permute_subset_sampler_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 6, 1]]
self.assertEqual(result, expected)
def test_len(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [random.randint(0, 1) for _ in dataset]
sampler = RandomSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
self.assertEqual(len(result), len(batch_sampler))
self.assertEqual(len(result), len(batch_sampler))
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
batch_sampler_len = len(batch_sampler)
result = list(batch_sampler)
self.assertEqual(len(result), batch_sampler_len)
self.assertEqual(len(result), len(batch_sampler))
class TestIterationBasedBatchSampler(unittest.TestCase):
def test_number_of_iters_and_elements(self):
for batch_size in [2, 3, 4]:
for num_iterations in [4, 10, 20]:
for drop_last in [False, True]:
dataset = [i for i in range(10)]
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last=drop_last)
iter_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations)
assert len(iter_sampler) == num_iterations
for i, batch in enumerate(iter_sampler):
start = (i % len(batch_sampler)) * batch_size
end = min(start + batch_size, len(dataset))
expected = [x for x in range(start, end)]
self.assertEqual(batch, expected)
if __name__ == "__main__":
unittest.main()
|
[
"anandkumar@instance-1.us-central1-a.c.andromanit.internal"
] |
anandkumar@instance-1.us-central1-a.c.andromanit.internal
|
ca2706a8d5c4c223e9c842547960c37338378f89
|
7f6395e060bc6c50ff37fb95733daec15473c85a
|
/app/migrations/0003_alter_loja_id.py
|
c9434c453421ee51ee5bea468b0aa034f5a2b064
|
[] |
no_license
|
Anaveronica3001/Projeto-LuizaCode
|
c4e9647b021dc84875894e414750ea07b8dd3037
|
77599b94c2b7ef96432a26abc1f43e652cb3520f
|
refs/heads/main
| 2023-07-19T02:56:26.043084
| 2021-09-27T22:18:43
| 2021-09-27T22:18:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.2.7 on 2021-09-23 22:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_alter_loja_id'),
]
operations = [
migrations.AlterField(
model_name='loja',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
|
[
"rachelacarvalho@gmail.com"
] |
rachelacarvalho@gmail.com
|
7e2d327f6e6b8b82fe4642dbba754763aa5129d9
|
c7bb6a3b348d34f4cb117f13cfd36172dff02d2e
|
/apps/vaccinations/permissions.py
|
c569fbe8705d804c89d39178c0d4a450b203123c
|
[] |
no_license
|
santosronald/Briana
|
2a6a2e97611981329abc2b374dd23ec83fa71b13
|
7ad74ca05cea9e2b7754917fd89bc066966651c0
|
refs/heads/master
| 2021-01-23T10:36:02.926304
| 2017-04-20T20:10:33
| 2017-04-20T20:10:33
| 93,078,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
from django.core.urlresolvers import resolve
from rest_framework.permissions import BasePermission
from apps.control.models import UserChild
from apps.vaccinations.models import AppliedVaccine
__author__ = 'klaatu'
class CanSeeAppliedVaccines(BasePermission):
def has_permission(self, request, view):
return UserChild.objects.filter(child=view.kwargs["child_pk"],relative=request.user).exists()
class CanSeeAppliedVaccine(BasePermission):
def has_object_permission(self, request, view, obj):
return (UserChild.objects.filter(child=view.kwargs["child_pk"],relative=request.user).exists() and AppliedVaccine.objects.filter(
child=view.kwargs["child_pk"], id=view.kwargs["pk"]).exists())
|
[
"erikd.guiba@gmail.com"
] |
erikd.guiba@gmail.com
|
d74a5ebdd3be83c433b05938cc73cf77d5130c3e
|
32db6e22f06523fba36988fd0456d365331f4322
|
/CSNet/dataset.py
|
6dee37f934e5edc21ad21dbb2d125c450d479039
|
[] |
no_license
|
Taylister/TGNet-Datagen
|
bcb8627ae8bb8c775a2ef52c7890433cce7a30b6
|
f528d3cacc40020fc3dcc27801587b8127cb3756
|
refs/heads/main
| 2023-03-11T11:54:32.634692
| 2021-03-01T07:13:48
| 2021-03-01T07:13:48
| 319,907,358
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,770
|
py
|
# author: Niwhskal
# github : https://github.com/Niwhskal/SRNet
import os
from skimage import io
from skimage.transform import resize
import numpy as np
import random
import cfg
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class CSNet_dataset(Dataset):
def __init__(self, cfg, torp, transforms = None):
self.data_dir = cfg.data_dir
self.i_s_dir = cfg.i_s_dir
self.batch_size = cfg.batch_size
self.data_shape = cfg.data_shape
self.torp = torp
if(self.torp == 'train'):
self.name_list = os.listdir(os.path.join(self.data_dir, cfg.train_data_dir, self.i_s_dir))
elif(self.torp == 'test'):
self.name_list = os.listdir(os.path.join(self.data_dir, cfg.test_data_dir, self.i_s_dir))
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
if(self.torp == 'train'):
img_name = self.name_list[idx]
i_s = io.imread(os.path.join(cfg.data_dir, cfg.train_data_dir, cfg.i_s_dir, img_name))
mask_t = io.imread(os.path.join(cfg.data_dir, cfg.train_data_dir, cfg.mask_t_dir, img_name), as_gray = True)
elif(self.torp == 'test'):
img_name = self.name_list[idx]
i_s = io.imread(os.path.join(cfg.data_dir, cfg.test_data_dir, cfg.i_s_dir, img_name))
mask_t = io.imread(os.path.join(cfg.data_dir, cfg.test_data_dir, cfg.mask_t_dir, img_name), as_gray = True)
return [i_s, mask_t]
class Example_dataset(Dataset):
def __init__(self, data_dir = cfg.example_data_dir, transform = None):
self.data_dir = data_dir
self.name_list = os.listdir(self.data_dir)
self.transform = transform
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_name = self.name_list[idx]
i_s = io.imread(os.path.join(self.data_dir, img_name))
h, w = i_s.shape[:2]
scale_ratio = cfg.data_shape[0] / h
to_h = cfg.data_shape[0]
to_w = int(round(int(w * scale_ratio) / 8)) * 8
to_scale = (to_h, to_w)
i_s = resize(i_s, to_scale, preserve_range=True)
sample = (i_s, img_name.split('.')[0])
if self.transform:
sample = self.transform(sample)
return sample
class To_tensor(object):
def __call__(self, sample):
i_s, img_name = sample
i_s = i_s.transpose((2, 0, 1)) /127.5 - 1
i_s = torch.from_numpy(i_s)
return (i_s.float(), img_name)
|
[
"track.and.field.c.love1203@gmail.com"
] |
track.and.field.c.love1203@gmail.com
|
f1278b3e1bc553043bd35ab9b750d6e8f12e1767
|
af62843e2af3ea6550f91372139dba31f9c58f7a
|
/markment/engine.py
|
a042be9af5bb87f438f80516ab4ffe4c9f5b1721
|
[
"MIT"
] |
permissive
|
hltbra/markment
|
e276c24ac9ce628bdd3a29f18732c41cf243d70e
|
59976c2fee9787c96aec7c4a9074ad0d2a904388
|
refs/heads/master
| 2021-01-18T06:17:34.395303
| 2013-05-27T17:21:26
| 2013-05-27T17:21:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,292
|
py
|
# -*- coding: utf-8 -*-
# <markment - markdown-based documentation generator for python>
# Copyright (C) <2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from copy import deepcopy
from misaka import HtmlRenderer, SmartyPants, Markdown
from misaka import (
EXT_FENCED_CODE,
EXT_NO_INTRA_EMPHASIS,
EXT_SUPERSCRIPT,
EXT_AUTOLINK,
EXT_TABLES,
HTML_USE_XHTML,
HTML_SMARTYPANTS,
)
from lxml import html as lhtml
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter
from .events import after
from .handy import slugify
class MarkmentRenderer(HtmlRenderer, SmartyPants):
def setup(self):
super(MarkmentRenderer, self).setup()
self.markment_indexes = []
self.url_prefix = None
self.code_count = {'text': '', 'count': 0}
self.url_references = []
def last_index_plus_child(self, level):
indexes = self.markment_indexes
for _ in range(level):
try:
last_index = indexes[-1]
except IndexError:
break
if 'child' not in last_index:
last_index['child'] = []
indexes = last_index['child']
return indexes
def count_index_for_header(self, text):
if self.code_count['text'] == text:
self.code_count['count'] += 1
else:
self.code_count['text'] = text
self.code_count['count'] = 1
return self.code_count['count']
def prefix_link_if_needed(self, link):
needs_prefix = '://' not in link and not link.startswith('//')
if not self.url_prefix:
return ''
if needs_prefix:
if callable(self.url_prefix):
prefixed = self.url_prefix(link)
else:
prefix = self.url_prefix.rstrip('/') + '/'
prefixed = prefix + link.lstrip('/')
self.url_references.append(prefixed)
else:
prefixed = ''
return prefixed
def table(self, header, body):
table = "".join([
'<table class="table">',
'<thead>', header.strip(), '</thead>',
'<tbody>', body.strip(), '</tbody>',
'</table>',
])
memory = {'element': table}
after.shout('markdown_table', memory)
return memory['element']
def image(self, link, title, alt):
url = link
prefixed = self.prefix_link_if_needed(link)
if prefixed:
url = prefixed
element = '<img src="{0}" title="{1}" alt="{2}" />'.format(
url,
title,
alt
)
memory = {'element': element}
after.markdown_image.shout(memory)
return memory['element']
def link(self, link, title, content):
url = link
prefixed = self.prefix_link_if_needed(link)
if prefixed:
url = prefixed
element = '<a href="{0}"{1}>{2}</a>'.format(
url,
title and 'title="{1}"'.format(title) or '',
content,
)
memory = {'element': element}
after.markdown_link.shout(memory)
return memory['element']
def header(self, text, level):
item = {
'text': str(text),
'anchor': '#{0}'.format(slugify(text)),
'level': int(level),
}
indexes = self.markment_indexes
if level > 1:
indexes = self.last_index_plus_child(level - 1)
indexes.append(item)
element = '<h{level} id="{slug}" name="{slug}"><a href="#{slug}">{text}</a></h{level}>'.format(
level=level,
text=text,
slug=slugify(text)
)
memory = {'element': element}
after.markdown_header.shout(memory)
return memory['element']
def add_attributes_to_code(self, code):
dom = lhtml.fromstring(code)
pre = dom.cssselect("div.highlight pre")[0]
if self.markment_indexes:
last_header = self.markment_indexes[-1]
slug_prefix = slugify(last_header['text'])
pre.attrib['name'] = "{0}-example-{1}".format(
slug_prefix,
self.count_index_for_header(last_header['text'])
)
return lhtml.tostring(dom)
def block_code(self, text, lang):
if lang:
lexer = get_lexer_by_name(lang, stripall=True)
else:
lexer = guess_lexer(text, stripall=True)
formatter = HtmlFormatter()
code = self.add_attributes_to_code(highlight(text, lexer, formatter))
memory = {'element': code}
after.markdown_code.shout(memory)
return memory['element']
class Markment(object):
extensions = (EXT_FENCED_CODE |
EXT_NO_INTRA_EMPHASIS |
HTML_SMARTYPANTS |
EXT_TABLES |
EXT_AUTOLINK |
EXT_SUPERSCRIPT |
HTML_USE_XHTML)
def __init__(self, markdown, renderer=None, url_prefix=None):
self.raw = markdown
self.renderer = renderer or MarkmentRenderer()
self.renderer.url_prefix = url_prefix
self.markdown = Markdown(
self.renderer,
extensions=self.extensions,
)
self.rendered = self.compile()
self.url_references = self.renderer.url_references
def compile(self):
return self.markdown.render(self.raw)
def index(self):
return deepcopy(self.renderer.markment_indexes)
|
[
"gabriel@nacaolivre.org"
] |
gabriel@nacaolivre.org
|
20a269ebaedbbf51b0d2ea1c0a32b4d85f91a46d
|
5de810ae0383d04957e14863f502d4d64345c8db
|
/node_modules/dtrace-provider/src/build/config.gypi
|
1e79170c4815154c1c845e230751e851cec78c0c
|
[
"BSD-2-Clause"
] |
permissive
|
GoestaHuppenbauer/PortfolioBot
|
c30fdb9f86dcf5f2f5bcd7de900e4bc5b6db8a1b
|
4debe72ee58f4d2c27c59eb7655ad5497d1026e5
|
refs/heads/master
| 2022-12-12T22:44:47.938483
| 2019-12-01T15:07:54
| 2019-12-01T15:07:54
| 225,156,368
| 0
| 0
| null | 2022-12-06T15:33:44
| 2019-12-01T12:17:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,506
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/gostahuppenbauer/.node-gyp/10.16.3",
"standalone_static_library": 1,
"dry_run": "",
"save_dev": "",
"legacy_bundling": "",
"only": "",
"viewer": "man",
"browser": "",
"commit_hooks": "true",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"timing": "",
"init_license": "ISC",
"if_present": "",
"init_author_email": "",
"sign_git_tag": "",
"cache_max": "Infinity",
"cert": "",
"local_address": "",
"long": "",
"git_tag_version": "true",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"always_auth": "",
"logs_max": "10",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"offline": "",
"searchlimit": "20",
"read_only": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"engine_strict": "",
"description": "true",
"https_proxy": "",
"userconfig": "/Users/gostahuppenbauer/.npmrc",
"init_module": "/Users/gostahuppenbauer/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "10.16.3",
"save": "true",
"editor": "vi",
"ignore_prepublish": "",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"before": "",
"global": "",
"progress": "true",
"searchstaleness": "900",
"ham_it_up": "",
"optional": "true",
"save_prod": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"ca": "",
"tag_version_prefix": "v",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"save_prefix": "^",
"dev": "",
"group": "20",
"save_exact": "",
"fetch_retry_factor": "10",
"prefer_offline": "",
"cache_lock_stale": "60000",
"version": "",
"otp": "",
"cache_min": "10",
"cache": "/Users/gostahuppenbauer/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.9.0 node/v10.16.3 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"git": "git",
"scope": "",
"init_author_name": "",
"tmp": "/var/folders/1l/5bhczk952tz3zb661w5vv89h0000gn/T",
"onload_script": "",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"40520263+GoestaHuppenbauer@users.noreply.github.com"
] |
40520263+GoestaHuppenbauer@users.noreply.github.com
|
f768a17d9375124b1b9dc3913ee75a866813d920
|
24cbb30b8527ad7a41aaa01a59ee3fcf4f67f24a
|
/limitsetting/theta/analysis_tmassfit_CNN_combo.py
|
8b086c0f575dc17330947bea05b329929143bf68
|
[] |
no_license
|
knash/NanoAODskimAna
|
01bfb4cb7152f707af8204c6d0da2877022767f5
|
adcbfae126775f272fb8be8fea3a2b583f56dd9e
|
refs/heads/master
| 2021-07-08T10:08:45.901977
| 2020-11-12T20:07:53
| 2020-11-12T20:07:53
| 209,535,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,223
|
py
|
# -*- coding: utf-8 -*-
import scipy.interpolate
import ROOT
def histogram_filter_partial(hname):
if "q2" in hname:
return False
if "tptrw" in hname:
return False
if "btag" in hname:
return False
if "semi_tag" in hname:
return False
#if "bmerge_tag" in hname:
#return False
return True
def histogram_filter_all(hname):
if "semi_tag" in hname:
return False
return True
def histogram_filter_sigonly(hname):
if "q2" in hname:
return False
if ('TT_sig' in hname) or ('TT_bmerge' in hname):
return True
if (("minus" in hname) or ("plus" in hname)):
return False
if "semi_tag" in hname:
return False
return True
def histogram_filter_nosyst(hname):
if (("minus" in hname) or ("plus" in hname)) and (not "_tag" in hname):
return False
if "semi_tag" in hname:
return False
return True
def histogram_filter_none(hname):
if (("minus" in hname) or ("plus" in hname)):
return False
return True
TT_SF_global=1.0
sigstr=""
unc1=False
unc2=False
unc3=True
unc4=False
#bsum=True
def build_allhad_model(TT_SF_local=1.0,ltype="Mu",cfile=""):
files = [cfile]
print "cfile",cfile
exstr=cfile[cfile.find("Pt"):cfile.find(".root")]+"_"
#HACKY! I HATE MYSELF
files[0]=files[0].replace("Ele",ltype).replace("Mu",ltype)
thefile=ROOT.TFile(files[0])
#model = build_model_from_rootfile(files, histogram_filter_all,include_mc_uncertainties=True)
if unc1:
model = build_model_from_rootfile(files, histogram_filter_sigonly,include_mc_uncertainties=True)
elif unc2:
model = build_model_from_rootfile(files, histogram_filter_nosyst,include_mc_uncertainties=True)
elif unc3:
model = build_model_from_rootfile(files, histogram_filter_partial,include_mc_uncertainties=True)
elif unc4:
model = build_model_from_rootfile(files, histogram_filter_none,include_mc_uncertainties=True)
model.fill_histogram_zerobins()
print dir(model)
evs = {}
for pr in model.processes:
print pr
if not (pr in evs):
evs[pr] = {}
for ob in model.observables:
name1 = ob+"__"+pr
evs[pr][ob] = thefile.Get(name1).Integral()
for par in model.distribution.get_parameters():
try:
print "par",par,thefile.Get(name1+"__"+par+"__minus").Integral()-evs[pr][ob],thefile.Get(name1+"__"+par+"__plus").Integral()-evs[pr][ob]
except:
print "none"
TT_SF=TT_SF_local
if not unc4:
evs['TT_sig']["mtop_"+exstr+ltype+"pass"]*=1.0/TT_SF
factor = (evs['TT_sig']["mtop_"+exstr+ltype+"fail"]+evs['TT_sig']["mtop_"+exstr+ltype+"pass"]*TT_SF-evs['TT_sig']["mtop_"+exstr+ltype+"pass"])/evs['TT_sig']["mtop_"+exstr+ltype+"fail"]
evs['TT_sig']["mtop_"+exstr+ltype+"fail"]*=factor
print "factors",factor,TT_SF
model.scale_predictions(1.0/TT_SF,'TT_sig',"mtop_"+exstr+ltype+"pass")
model.scale_predictions(factor,'TT_sig',"mtop_"+exstr+ltype+"fail")
#model.scale_predictions((evs["QCD"]["mtop_"+exstr+ltype+"pass"]+evs["WJetsToLNu"]["mtop_"+exstr+ltype+"pass"])/evs["WJetsToLNu"]["mtop_"+exstr+ltype+"pass"],"WJetsToLNu","mtop_"+exstr+ltype+"pass")
#model.scale_predictions((evs["QCD"]["mtop_"+exstr+ltype+"fail"]+evs["WJetsToLNu"]["mtop_"+exstr+ltype+"fail"])/evs["WJetsToLNu"]["mtop_"+exstr+ltype+"fail"],"WJetsToLNu","mtop_"+exstr+ltype+"fail")
#model.scale_predictions(0.0,"QCD","*")
floatnum=1.2
for ob in model.observables:
print ob
lepst=""
if ob.find("Ele")!=-1:
lepst="Ele"
if ob.find("Mu")!=-1:
lepst="Mu"
ptstr=ob[ob.find("Pt"):ob.find("_CNN")]
passf=ob[-4:]
if ob.find("Mu")!=-1:
lepst="Mu"
for pr in model.processes:
ptype=pr
if unc1 or unc3:
if pr=="QCD" or pr=="WJetsToLNu" or pr=="TT":
ptype="bkg"
model.add_lognormal_uncertainty(ptype+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
if pr in ["TT_semi"]:
model.add_lognormal_uncertainty(pr+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
if unc2:
if pr=="QCD" or pr=="WJetsToLNu" or pr=="TT":
ptype="bkg"
model.add_lognormal_uncertainty(ptype+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
if pr in ["TT_semi"]:
model.add_lognormal_uncertainty(pr+'_rate'+ob, math.log(floatnum), pr,ob)
if unc4:
if pr=="QCD" or pr=="WJetsToLNu" or pr=="TT":
ptype="bkg"
model.add_lognormal_uncertainty(ptype+'_rate'+ptstr+passf, math.log(floatnum), pr,ob)
else:
model.add_lognormal_uncertainty(pr+'_rate'+ptstr+passf, math.log(floatnum), pr,ob)
if unc1:
if pr.find("TT")!=-1 :
ptype="TT"
model.add_lognormal_uncertainty(ptype+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
elif unc2:
if pr.find("TT")!=-1 and pr!="TT" and pr!="TT_semi" :
ptype="TT"
model.add_lognormal_uncertainty(pr+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
elif unc3:
if pr.find("TT")!=-1 and pr!="TT" and pr!="TT_semi" :
ptype="TT"
model.add_lognormal_uncertainty(pr+'_rate'+exstr+lepst, math.log(floatnum), pr,ob)
return model
#SFstring = "SFemu",str(TT_SF).replace(".","p")
deltaSF = 99999.0
cursfs = [0.8,0.82,0.84,0.86,0.88,0.9,0.92,0.94,0.96,0.98,1.0,1.02,1.04,1.06,1.08,1.1,1.12,1.14,1.16,1.18,1.2]
cursfs = [1.0]
ncount=0
ptsels="RFILE".split(",")
for cursf in cursfs:
models=[]
for iff,ff in enumerate(ptsels):
#print ff
models.append(build_allhad_model(cursf,"Mu","ThetaFile_ttfit_Mu"+ff+".root"))
models.append(build_allhad_model(cursf,"Ele","ThetaFile_ttfit_Ele"+ff+".root"))
for iff,ff in enumerate(models):
if iff==0:
model=models[iff]
else:
model.combine(models[iff])
for p in model.distribution.get_parameters():
d = model.distribution.get_distribution(p)
if d['typ'] == 'gauss' and d['mean'] == 0.0 and d['width'] == 1.0:
model.distribution.set_distribution_parameters(p, range = [-5.0, 5.0])
print "dist",p
#if p.find("_tag")!=-1:
if p.find("_tag")!=-1 or p.find("_rate")!=-1:
print "float"
model.distribution.set_distribution(p, typ = "gauss",mean = 0.0,width = inf, range = [-10.,10.])
posttext = "ALL"
if str(cursf)!="1.0":
posttext+=str(cursf).replace(".","p")
signal_process_groups = {sigstr: [sigstr]}
myopts = Options()
myopts.set('minimizer', 'strategy', 'robust')
myopts.set('minimizer', 'minuit_tolerance_factor', '40')
myopts.set('minimizer', 'mcmc_iterations', '100000')
print "running",cursf
#parVals = mle(model, input = 'data', n=20, signal_process_groups = signal_process_groups,signal_prior='fix:1.0',options=myopts)
#ngroups=4
#obl=[]
#for ib,ob in enumerate(sorted(model.observables)):
# print ib,ob,ib%ngroups
# obl.append(ob)
# if len(obl)==ngroups:
# print obl
# copym=copy.deepcopy(model)
# copym.restrict_to_observables(obl)
# parVals = mle(copym, input = 'data', n=1, with_error = True, signal_process_groups = {'': []},chi2=True,options=myopts)
# print parVals
parVals = mle(model, input = 'data', n=1, with_error = True, signal_process_groups = {'': []},chi2=True,options=myopts)
#parVals = bayesian_posterior_model_prediction(model, input = 'data', n=20, signal_process_groups={'': []}, options=myopts)
#print pl_interval(model, input = 'data', n=1,cls = [cl_1sigma] ,signal_process_groups = {'': []}, options=myopts,parameter = 'TT_sig_tag_Pt575to650_CNN0p9')
print "-"*20
print parVals
print "-"*20
parameter_values = {}
for p in model.get_parameters([]):
#print p
parameter_values[p] = parVals[sigstr][p][0][0]
print parameter_values
histos = evaluate_prediction(model, parameter_values, include_signal = True)
print "dirh",dir(histos)
write_histograms_to_rootfile(histos, 'histos-CNN-mle_syst'+posttext+'.root')
parameter_values_AB = {}
for p in model.get_parameters([]):
#print p
if p=="beta_signal":
parameter_values_AB[p] = 1.0
elif p.find("TT_sig_tag")!=-1:
parameter_values_AB[p] = 0.0
else:
parameter_values_AB[p] = parVals[sigstr][p][0][0]
#print parameter_values_AB
histos = evaluate_prediction(model, parameter_values_AB, include_signal = True)
write_histograms_to_rootfile(histos, 'histos-CNN-mle_syst-AB'+posttext+'.root')
parameter_values1 = {}
for p in model.get_parameters([]):
parameter_values1[p] = parVals[sigstr][p][0][0]+parVals[sigstr][p][0][1]
histos1 = evaluate_prediction(model, parameter_values1, include_signal = True)
write_histograms_to_rootfile(histos1, 'histos-CNN-mleup_syst'+posttext+'.root')
#print "post fit full up: ",parameter_values1
parameter_values2 = {}
for p in model.get_parameters([]):
parameter_values2[p] = parVals[sigstr][p][0][0]-parVals[sigstr][p][0][1]
histos2 = evaluate_prediction(model, parameter_values2, include_signal = True)
write_histograms_to_rootfile(histos2, 'histos-CNN-mledown_syst'+posttext+'.root')
#print
#print "Up"
for ip in model.get_parameters([]):
parameter_values_up = {}
for jp in model.get_parameters([]):
if jp==ip:
parameter_values_up[jp] = parVals[sigstr][jp][0][0]+parVals[sigstr][jp][0][1]
else:
parameter_values_up[jp] = parVals[sigstr][jp][0][0]
#print ip
#print parameter_values_up
histos_up = evaluate_prediction(model, parameter_values_up, include_signal = True)
write_histograms_to_rootfile(histos_up, 'postfithistos/histos-CNN-'+ip+'up_syst'+posttext+'.root')
#print
#print "Down"
for ip in model.get_parameters([]):
parameter_values_down = {}
for jp in model.get_parameters([]):
if jp==ip:
parameter_values_down[jp] = parVals[sigstr][jp][0][0]-parVals[sigstr][jp][0][1]
else:
parameter_values_down[jp] = parVals[sigstr][jp][0][0]
#print ip
#print parameter_values_down
histos_down = evaluate_prediction(model, parameter_values_down, include_signal = True)
write_histograms_to_rootfile(histos_down, 'postfithistos/histos-CNN-'+ip+'down_syst'+posttext+'.root')
#for p in model.distribution.get_parameters():s
# d = model.distribution.get_distribution(p)
# if p=="TT_sig_tag":
# print "lock"
# model.distribution.set_distribution(p, typ = "gauss",mean = 0.0,width = 0.00000000001, range = [-7.,7.])
#parValspre = mle(model, input = 'data', n=1, signal_process_groups = signal_process_groups,signal_prior='fix:1.0',options=myopts)
#print "Fit result: "
#for ppp in parValspre[sigstr]:
# print ppp,"pre",parValspre[sigstr][ppp],"post",parVals[sigstr][ppp]
#print "--"
#print "Scale",parValspre[sigstr]["TT_sig_tag"]
#print "--"
parameter_values_pre = {}
for p in model.get_parameters([]):
if p=="beta_signal":
parameter_values_pre[p] = 1.0
else:
parameter_values_pre[p] = 0.0
#print parameter_values_pre
histos = evaluate_prediction(model, parameter_values_pre, include_signal = True)
write_histograms_to_rootfile(histos, 'histos-CNN-mle_syst-pre'+posttext+'.root')
parameter_values_ON = {}
for p in model.get_parameters([]):
if p=="beta_signal":
parameter_values_ON[p] = 1.0
elif p.find("TT_sig_tag")!=-1:
parameter_values_ON[p] = 0.0
else:
parameter_values_ON[p] = parVals[sigstr][p][0][0]
#print parameter_values_ON
histos = evaluate_prediction(model, parameter_values_ON, include_signal = True)
write_histograms_to_rootfile(histos, 'histos-CNN-mle_syst-ON'+posttext+'.root')
print
print "Up"
for ip in model.get_parameters([]):
parameter_values_up = {}
for jp in model.get_parameters([]):
if jp==ip:
parameter_values_up[jp] = parVals[sigstr][jp][0][0]+parVals[sigstr][jp][0][1]
elif jp=="beta_signal":
parameter_values_up[jp] = 1.0
elif jp.find("TT_sig_tag")!=-1:
parameter_values_up[jp] = 0.0
else:
parameter_values_up[jp] = parVals[sigstr][jp][0][0]
#print ip
#print parameter_values_up
histos_up = evaluate_prediction(model, parameter_values_up, include_signal = True)
write_histograms_to_rootfile(histos_up, 'prefithistos/histos-CNN-'+ip+'up_syst'+posttext+'.root')
print
print "Down"
for ip in model.get_parameters([]):
parameter_values_down = {}
for jp in model.get_parameters([]):
if jp==ip:
parameter_values_down[jp] = parVals[sigstr][jp][0][0]-parVals[sigstr][jp][0][1]
elif jp=="beta_signal":
parameter_values_down[jp] = 1.0
elif jp.find("TT_sig_tag")!=-1:
parameter_values_down[jp] = 0.0
else:
parameter_values_down[jp] = parVals[sigstr][jp][0][0]
#print ip
#print parameter_values_down
histos_down = evaluate_prediction(model, parameter_values_down, include_signal = True)
write_histograms_to_rootfile(histos_down, 'prefithistos/histos-CNN-'+ip+'down_syst'+posttext+'.root')
parameter_values_AB = {}
for p in model.get_parameters([]):
#print p
if p=="beta_signal":
parameter_values_AB[p] = 1.0
elif p.find("TT_bmerge_tag")!=-1:
#print "bmerge"
parameter_values_AB[p] = 0.0
else:
parameter_values_AB[p] = parVals[sigstr][p][0][0]
#print parameter_values_AB
histos = evaluate_prediction(model, parameter_values_AB, include_signal = True)
write_histograms_to_rootfile(histos, 'histos-CNN-mle_syst-AB'+posttext+'_bmerge.root')
model_summary(model)
report.write_html('htmlout')
|
[
"knash201@gmail.com"
] |
knash201@gmail.com
|
bff30b9ffa84cf82990529d2205628711ad8b099
|
087db43af911ae0c2320187743f30a1fb1148e8a
|
/Week_8/2048_attempt/util.py
|
f89ff34896ae92c876878841d5912494d8270362
|
[] |
no_license
|
LuyandaGitHub/intro_python
|
00b6e1c02d67c00bb6f7dced3a837d23bc5ee4e6
|
317099ab01ba3228163b6a7459615d6b55d595c8
|
refs/heads/master
| 2022-11-27T01:43:51.970893
| 2020-07-30T12:45:59
| 2020-07-30T12:45:59
| 283,663,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
# IMPORT THE random MODULE
import random
# THIS FUNCTION WILL CREATE A GRID WITH 16 0'S
def create_grid(grid_param):
for i in range(16):
grid_param.append(0)
# THIS FUNCTION WILL PRINT THE GRID
def print_grid(grid_param):
row_1 = ''
row_2 = ''
row_3 = ''
row_4 = ''
# WE LOOP THROUGH THE grid_param TO MAKE SURE WE GET EVERY ITEM INSIDE
for j in range(len(grid_param)):
if(j <= 3):
row_1 += str(grid_param[j]) + ' | '
elif(j > 3 and j < 8):
row_2 += str(grid_param[j]) + ' | '
elif(j > 7 and j < 12):
row_3 += str(grid_param[j]) + ' | '
elif(j > 11 and j < 16):
row_4 += str(grid_param[j]) + ' | '
print(row_1)
print(row_2)
print(row_3)
print(row_4)
# THIS FUNCTION WILL GENERATE A RANDOM NUMBER
def generate_next_position(grid_param):
# positionOfSquare WILL == A RANDOM NUMBER BETWEEN 0 AND THE LENGTH OF squares
position_of_square = random.randint(0, len(grid_param) - 1)
# CHECK IF positionOfSquare IS AVAILABLE
if(grid_param[position_of_square] == 0):
grid_param[position_of_square] = 2
# CHECK IF GAME IS OVER
check_for_loss(grid_param)
else:
# IF positionOfSquare IS UNAVAILABLE, RERUN THE generate_next_position()
generate_next_position(grid_param)
# THIS FUNCTION IS GONNA COMBINE THE SQUARES
def merge_rows(grid_param):
# WE USE squares.length, SO THAT WE DON'T CHECK THE SQUARE TO THE RIGHT OF THE LAST SQUARE
for i in range(len(grid_param)):
# MEANING SQUARES NEXT TO EACH OTHER ARE THE SAME
if(grid_param[i] == grid_param[i + 1]):
total = int(grid_param[i]) + int(grid_param[i + 1])
# UPDATE THE grid_param
grid_param[i] = total
grid_param[i + 1] = 0
checkForWin()
# THIS FUNCTION IS GONNA COMBINE THE SQUARES
def merge_columns(grid_param):
# WE ARE CHECKING THE SQUARE BELOW THE ONE THAT WE 'RE LOOPING OVER. SO WE END AT 12 BECAUSE THERE ARE NO SQUARES UNDER SQUARE 13, 14, 15 AND 16
for i in range(12):
# MEANING SQUARES NEXT TO EACH OTHER ARE THE SAME
if(grid_param[i] == grid_param[i + 4]):
total = int(grid_param[i]) + int(grid_param[i + 4])
# UPDATE THE grid_param
grid_param[i] = total
grid_param[i + 4] = 0
checkForWin()
# THIS FUNCTION WILL CHECK FOR A WIN
def check_for_win(grid_param):
for i in range(len(grid_param)):
if(grid_param[i] == 2048):
print('YOU WINN!!!!')
# THIS FUNCTION WILL CHECK FOR A LOSS
def check_for_loss(grid_param):
zeros_amount = 0;
for i in range(len(grid_param)):
if(grid_param[i] == 0):
zeros_amount = zeros_amount + 1
if(zeros_amount == 0):
print('YOU LOSE!!!')
|
[
"noreply@github.com"
] |
LuyandaGitHub.noreply@github.com
|
6035aff637448d6f342a96ae9243e4ce19471990
|
b1e059870a72cfe6ef90b9a5f615cb4b5aa4de41
|
/Chap 1/Image ginput.py
|
43187a12889075c52b6cf77746716fa6fd0eee57
|
[] |
no_license
|
Wangman1/python-computer-version
|
bfc679c00f5bdaab39c67098935f240be7f47cf1
|
aca121fa51e2cbb5d0c21f1252f1c12f6811ea90
|
refs/heads/master
| 2020-03-06T15:03:10.846086
| 2018-03-27T08:37:45
| 2018-03-27T08:37:45
| 126,947,754
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from PIL import Image
from pylab import *
im = array(Image.open('E:/python/Python Computer Vision/Image data/empire.jpg'))
imshow(im)
print('Please click 3 points')
x = ginput(3)
print('you clicked:', x)
show()
|
[
"244280786@qq.com"
] |
244280786@qq.com
|
6e5ed45d8e324cd491dc14f45253b175fea26caa
|
50a6f90c46ee83e667de08be9c74acbaa792dbc5
|
/filefi/025/mc.py
|
57a87e7379c99302830a70f355932dc6c3a40a8d
|
[
"MIT"
] |
permissive
|
cpausmit/Kraken
|
e8f51a46e5d181e855bb9d2276b66c67e5842888
|
e79a19f6a4570e10ae52e543a5af9b2a3414c965
|
refs/heads/master
| 2023-08-16T21:36:54.426014
| 2023-08-15T14:11:08
| 2023-08-15T14:11:08
| 75,231,636
| 0
| 2
|
MIT
| 2018-02-08T14:25:53
| 2016-11-30T22:09:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
# $Id: BAMBUProd_AODSIM42.py,v 1.1 2011/10/09 14:15:32 bendavid Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_025a'),
annotation = cms.untracked.string('AODSIM'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/data/blue/bendavid/423aodsim/884FBED5-AC91-E011-B64A-90E6BA442EEB.root'),
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'START44_V6::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences42.BambuFillAODSIM_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.bambu_step = cms.Path(process.BambuFillAODSIM)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
|
[
"paus@mit.edu"
] |
paus@mit.edu
|
f86623fcf6f7206be652b39fbbe524b18419d731
|
95447e499f9eb8bed0b11a93b5bb48c12657e9f6
|
/colorscheme.py
|
dfdd9dc830399b9f47bb1e5bb85e731559e04f53
|
[] |
no_license
|
trhartke/turtle-papyrus
|
8c18d6c0e30c5e6905142914deb813ece78b415c
|
ba908d5b76b838e70ff11e8a84fd0dd37b7e8081
|
refs/heads/master
| 2021-05-14T13:05:17.762162
| 2018-01-05T20:49:30
| 2018-01-05T20:49:30
| 116,427,713
| 0
| 0
| null | 2018-01-05T21:17:41
| 2018-01-05T21:17:41
| null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
#RGB format
blue = (6/255. , 111/255. , 167/255. )
green = (66/255. , 165/255. , 74/255. )
red = (234/255. , 29/255. , 34/255. )
purple = (126/255. , 77/255. , 132/255. )
orange = (238/255. , 135/255. , 38/255. )
black = (2/255. , 7/255. , 5/255. )
yellow = (254/255. , 244/255. , 49/255. )
|
[
"eqm@mit.edu"
] |
eqm@mit.edu
|
4581ab414b51e7074ca5e0bd5d88db5635789b56
|
6f099b8912e3623c524a86900ec252fd69f48e91
|
/flbdeezer/flbdeezer/wsgi.py
|
313cc95dc9e9d747244825a008bdd5f38a039b5b
|
[] |
no_license
|
crypticwasp254/apiflbdeeze
|
8595f40940fbe63926c4e102249114226563a232
|
2de08dedb8dcfde953761a86d20536363b533729
|
refs/heads/master
| 2023-04-04T19:14:17.122822
| 2021-01-08T14:08:05
| 2021-01-08T14:08:05
| 327,902,659
| 2
| 1
| null | 2021-04-15T08:13:42
| 2021-01-08T12:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for flbdeezer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flbdeezer.settings')
application = get_wsgi_application()
|
[
"crypticwasp254@gmail.com"
] |
crypticwasp254@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.