blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20afedb1b001619332e9d7e143861e7ec13ba57a
|
45e97bd0c32042504052342bc1ae4e66a30d4d9a
|
/corepy/chapter13/demo5-trackInstance.py
|
34e5d0f1d27f10ca12c701e54330defcbeef7adc
|
[] |
no_license
|
vonzhou/py-learn
|
acf20c5183bff9788fcae9e36abdcd6f9bc553da
|
f0794164105dddbdffe082dfc90520f8778cbec3
|
refs/heads/master
| 2016-09-10T01:29:30.551541
| 2015-12-08T08:53:46
| 2015-12-08T08:53:46
| 27,669,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
''' P343'''
class InsCnt(object):
count = 0 #count是一个类属性
def __init__(self):
InsCnt.count += 1
def __del__(self):
InsCnt.count -= 1
def howMany(self):
return InsCnt.count
c1 = InsCnt()
print c1.howMany()
c2 = c1
print c2.howMany()
c3 = InsCnt()
print howMany()
del c1
del c2
print howMany()
del c3
print howMany()
raw_input()
raw_input()
|
[
"vonzhou@163.com"
] |
vonzhou@163.com
|
2c6c3a09d95945c7a9f020b9df2ee127ebe4414a
|
00e29479dc7c45a9e019f96c90a69a49af618ccf
|
/src/api-engine/src/api/routes/user/views.py
|
42300c55ed400a24f4c2f80abe33262434251b0a
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
yunchaozou/cello
|
8dd081db2ce5d9b8975d553d4491d329790588ef
|
68158f572c688f1710813c4df47fad28c3d4276c
|
refs/heads/master
| 2020-04-22T08:40:53.157301
| 2019-02-08T22:35:35
| 2019-02-08T22:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
from api.routes.network.serializers import NetworkListResponse
from api.utils.common import with_common_response
from api.routes.company.serializers import (
NodeOperationSerializer,
CompanyQuery,
CompanyCreateBody,
CompanyIDSerializer,
)
from api.auth import CustomAuthenticate
LOG = logging.getLogger(__name__)
class UserViewSet(viewsets.ViewSet):
authentication_classes = (CustomAuthenticate,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
query_serializer=CompanyQuery,
responses=with_common_response(
with_common_response({status.HTTP_200_OK: NetworkListResponse})
),
)
def list(self, request, *args, **kwargs):
"""
List Users
List user through query parameter
"""
LOG.info("user %s", request.user.role)
return Response(data=[], status=status.HTTP_200_OK)
@swagger_auto_schema(
request_body=CompanyCreateBody,
responses=with_common_response(
{status.HTTP_201_CREATED: CompanyIDSerializer}
),
)
def create(self, request):
"""
Create User
Create new user
"""
pass
@swagger_auto_schema(
responses=with_common_response(
{status.HTTP_204_NO_CONTENT: "No Content"}
)
)
def destroy(self, request, pk=None):
"""
Delete User
Delete user
"""
pass
@action(
methods=["get", "post", "put", "delete"],
detail=True,
url_path="attributes",
)
def attributes(self, request, pk=None):
"""
get:
Get User Attributes
Get attributes of user
post:
Create Attributes
Create attribute for user
put:
Update Attribute
Update attribute of user
delete:
Delete Attribute
Delete attribute of user
"""
pass
@swagger_auto_schema(method="post", responses=with_common_response())
@action(methods=["post"], detail=True, url_path="password")
def password(self, request, pk=None):
"""
post:
Update/Reset Password
Update/Reset password for user
"""
pass
|
[
"hightall@me.com"
] |
hightall@me.com
|
0f638cb37c3cfe9526b6699975d791d2e7f0eaff
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_yesdongil_main_big.py
|
207b07db4c346d26f784da03b2499dd769587875
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
import os
import sys
import math
import itertools
def xrange(start, stop):
i = start
while i < stop:
yield i
i += 1
def is_prime(value) :
ret = 0
if (value % 2) == 0 :
ret = 2
elif (value % 3) == 0 :
ret = 3
else :
limit = int(math.sqrt(value))
index_limit = limit/6 + 1
for i in xrange(1, index_limit) :
prime_v = 6*i - 1
if (value % prime_v) == 0 :
ret = prime_v
break
prime_v = 6*i + 1
if (value % prime_v) == 0 :
ret = prime_v
break
if(prime_v > 10000) :
break
return ret
def make_value(N, middle, base) :
result = 1 + base**(N-1)
mul = base
while (middle > 0) :
remainder = middle % 2
if(remainder == 1) :
result += mul
mul=mul*base
middle /= 2
return result
def get_result(N, J) :
ret = []
result = []
limit = 2**(N-2)
prime_ret = 0
list_count = 0
for i in range(0, limit) :
divisor_list = []
for base in range(2, 11) :
test_v = make_value(N, i, base)
prime_ret = is_prime(test_v)
if(prime_ret == 0) :
break
else :
divisor_list.append(prime_ret)
if(prime_ret > 0) :
result.append(make_value(N, i, 10))
result.extend(divisor_list)
ret.append(result)
result = []
list_count += 1
if(list_count == J) :
break
return ret
def Main():
result_list = []
arg = []
CASE_N = int(raw_input())
line = raw_input()
arg = line.split()
result_list = get_result(int(arg[0]), int(arg[1]))
print 'Case #1:'
for result in result_list :
for result_one in result :
sys.stdout.write(str(result_one) + ' ')
sys.stdout.write('\n')
if __name__ == '__main__':
sys.exit(Main())
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
edeee6602b190ca18e61ad5f160a964b27d00952
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_256/ch34_2020_04_23_17_53_27_919517.py
|
ee3644d60592c2aae2ec473d6ba51ecadb41d2fe
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
def eh_primo(n):
if n == 0 or n == 1:
return False
elif n == 2:
return True
for i in range(3, n, 2):
if n%2==0:
return False
elif n%i==0:
return False
else:
return True
def maior_primo_menor_que(n):
p = -1
while n>0:
if n ==2:
return 2
else:
if eh_primo(n):
p == n
elif:
n-=1
return p
|
[
"you@example.com"
] |
you@example.com
|
fafe549264abbc0749503ca3d08dbbc62fe4299b
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/lambda_write_f/function-event-invoke-config_delete.py
|
15016e42d8d9604bacece336bdaec2f86610b9a1
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
get-function-event-invoke-config : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/get-function-event-invoke-config.html
list-function-event-invoke-configs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/list-function-event-invoke-configs.html
put-function-event-invoke-config : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/put-function-event-invoke-config.html
update-function-event-invoke-config : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/update-function-event-invoke-config.html
"""
write_parameter("lambda", "delete-function-event-invoke-config")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
4e42e313b4e8f4517cca59865a67badc6b525b39
|
200df6cda6e54d56a4c800e10e6d5f248d7d59f2
|
/02-算法思想/广度优先搜索/778.水位上升的泳池中游泳(H).py
|
0d5613ed1b63a75e2a20984da04124b0b0f7e70b
|
[] |
no_license
|
jh-lau/leetcode_in_python
|
b9b9a47d0b3ce29c3c56836b39decc3ec4487777
|
1d1876620a55ff88af7bc390cf1a4fd4350d8d16
|
refs/heads/master
| 2023-04-17T15:01:49.925774
| 2021-04-24T01:17:39
| 2021-04-24T01:17:39
| 192,735,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,600
|
py
|
"""
@Author : liujianhan
@Date : 20/9/26 19:31
@Project : leetcode_in_python
@FileName : 778.水位上升的泳池中游泳(H).py
@Description : 在一个 N x N 的坐标方格 grid 中,每一个方格的值 grid[i][j] 表示在位置 (i,j) 的平台高度。
现在开始下雨了。当时间为 t 时,此时雨水导致水池中任意位置的水位为 t 。你可以从一个平台游向四周相邻的任意一个平台,
但是前提是此时水位必须同时淹没这两个平台。假定你可以瞬间移动无限距离,也就是默认在方格内部游动是不耗时的。
当然,在你游泳的时候你必须待在坐标方格里面。
你从坐标方格的左上平台 (0,0) 出发。最少耗时多久你才能到达坐标方格的右下平台 (N-1, N-1)?
示例 1:
输入: [[0,2],[1,3]]
输出: 3
解释:
时间为0时,你位于坐标方格的位置为 (0, 0)。
此时你不能游向任意方向,因为四个相邻方向平台的高度都大于当前时间为 0 时的水位。
等时间到达 3 时,你才可以游向平台 (1, 1). 因为此时的水位是 3,坐标方格中的平台没有比水位 3 更高的,所以你可以游向坐标方格中的任意位置
示例2:
输入: [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
输出: 16
解释:
0 1 2 3 4
24 23 22 21 5
12 13 14 15 16
11 17 18 19 20
10 9 8 7 6
最终的路线用加粗进行了标记。
我们必须等到时间为 16,此时才能保证平台 (0, 0) 和 (4, 4) 是连通的
提示:
2 <= N <= 50.
grid[i][j] 位于区间 [0, ..., N*N - 1] 内。
"""
import bisect
import sys
from typing import List
class Solution:
# 228ms, 14MB
@staticmethod
def swim_in_water(grid: List[List[int]]) -> int:
"""
并查集
@param grid:
@return:
"""
n = len(grid)
p = [[(i, j) for j in range(n)] for i in range(n)] # 并查集二维数组初始化
h = sorted([[grid[i][j], i, j] for j in range(n) for i in range(n)]) # 按高度对点排序
def f(a, b):
if (a, b) != p[a][b]:
p[a][b] = f(*p[a][b]) # 二元并查集,元组传参要用*解包
return p[a][b]
k = 0
for t in range(max(grid[0][0], grid[-1][-1]), h[-1][0]): # 起点是两个对角的最大值,终点是整个数据里的最大高度
while h[k][0] <= t:
_, i, j = h[k]
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n:
if grid[i][j] <= t and grid[x][y] <= t:
(pi, pj), (px, py) = f(i, j), f(x, y)
if (pi, pj) != (px, py): # 让符合时间空间条件且不相同的集合合并
p[px][py] = (pi, pj)
k += 1
if f(0, 0) == f(n - 1, n - 1): # 首末元素属于同一个集合就返回答案
return t
return h[-1][0]
# 172ms,, 13.8MB
@staticmethod
def swim_in_water_v2(grid: List[List[int]]) -> int:
"""
BFS
@param grid:
@return:
"""
n = len(grid)
c = {(0, 0)} # 访问标记
for t in range(max(grid[0][0], grid[-1][-1]), sys.maxsize): # 从首末元素的最大时间作为最开始的判断条件
p = c.copy() # 宽搜队列初始化,每个时间点的初始状态是上一轮时间访问标记过的坐标
while p:
q = set() # 下一批宽搜队列
for i, j in p:
if i == j == n - 1: # 如果走到目标了就返回时间
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and grid[x][y] <= t and (x, y) not in c: # 符合时空条件就扩散地图
q |= {(x, y)}
c |= {(x, y)}
p = q
# 128ms, 13.8MB
@staticmethod
def swim_in_water_v3(grid: List[List[int]]) -> int:
"""
升序队列
@param grid:
@return:
"""
n = len(grid)
b = {(0, 0)} # 访问标记
p = [[grid[0][0], 0, 0]] # 升序队列初始化
t = 0 # 途径最大时间标记
while True:
h, i, j = p.pop(0)
t = max(t, h)
if i == j == n - 1: # 找到终点就就返回时间
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in b:
bisect.insort(p, [grid[x][y], x, y]) # 二分插入
b |= {(x, y)}
# 140ms, 13.7MB
@staticmethod
def swim_in_water_v4(grid: List[List[int]]) -> int:
"""
双向升序队列
@param grid:
@return:
"""
n = len(grid)
b, e = {(0, 0)}, {(n - 1, n - 1)} # 双向访问标记
p, q = [[grid[0][0], 0, 0]], [[grid[-1][-1], n - 1, n - 1]] # 双向升序队列初始化
t = 0 # 途径最大时间标记
while True:
h, i, j = p.pop(0)
t = max(t, h)
if (i, j) in e: # 如果找到的点已经存在于另一个队列里,就返回答案
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in b:
bisect.insort(p, [grid[x][y], x, y])
b |= {(x, y)}
h, i, j = q.pop(0) # 从这里开始都是对称的,调换p,q,b,e就行。
t = max(t, h)
if (i, j) in b:
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in e:
bisect.insort(q, [grid[x][y], x, y])
e |= {(x, y)}
if __name__ == '__main__':
test_cases = [
[[0, 2], [1, 3]],
[[0, 1, 2, 3, 4], [24, 23, 22, 21, 5], [12, 13, 14, 15, 16], [11, 17, 18, 19, 20], [10, 9, 8, 7, 6]],
]
for tc in test_cases:
print(Solution.swim_in_water(tc))
print(Solution.swim_in_water_v2(tc))
print(Solution.swim_in_water_v3(tc))
print(Solution.swim_in_water_v4(tc))
|
[
"lorgerd@163.com"
] |
lorgerd@163.com
|
793c15be2778bfa6a0852f657ea403fc51e685ba
|
a3f793a53361d08f3e0cdedc7fab9df40e201eef
|
/main.py
|
a53882b59400172fbcb656c830535363798e384d
|
[] |
no_license
|
songshanshi/imoocc_py3
|
156db4f072bc956f45cbcc8c61fca964be8acfb9
|
6f3491ce857c541bf55d5ed8993265b7dd4dee09
|
refs/heads/master
| 2020-04-28T02:25:18.241155
| 2018-10-16T07:20:15
| 2018-10-16T07:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,917
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#########################################################################
# Author:Jeson
# Email:jeson@imoocc.com
import datetime
import os
import re
import yaml
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
# import sys
os.environ["DJANGO_SETTINGS_MODULE"] = 'admin.settings.local_cj'
import django
import time
django.setup()
from scanhosts.models import HostLoginifo
from scanhosts.util.nmap_all_server import NmapNet
from scanhosts.util.nmap_all_server import NmapDocker
from scanhosts.util.nmap_all_server import NmapKVM
from scanhosts.util.nmap_all_server import NmapVMX
from scanhosts.util.nmap_all_server import snmp_begin
from scanhosts.util.j_filter import FilterRules
from scanhosts.util.get_pv_relation import GetHostType
from detail.models import PhysicalServerInfo,ConnectionInfo,OtherMachineInfo,StatisticsRecord
from operations.models import MachineOperationsInfo
from scanhosts.util.nmap_all_server import NetDevLogin
from admin.settings.local_cj import BASE_DIR
import logging
logger = logging.getLogger("django")
from apps.detail.utils.machines import Machines
# def net_begin():
# '''
# 开始执行网络扫描
# :return:
# '''
# nm = NmapNet(oid='1.3.6.1.2.1.1.5.0',Version=2)
# nm_res = nm.query()
# print "...................",nm_res
def main():
'''
读取扫描所需配置文件
:return:
'''
s_conf = yaml.load(open('conf/scanhosts.yaml'))
s_nets = s_conf['hostsinfo']['nets']
s_ports = s_conf['hostsinfo']['ports']
s_pass = s_conf['hostsinfo']['ssh_pass']
s_cmds = s_conf['hostsinfo']['syscmd_list']
s_keys = s_conf['hostsinfo']['ssh_key_file']
s_blacks = s_conf['hostsinfo']['black_list']
s_emails = s_conf['hostsinfo']['email_list']
n_sysname_oid = s_conf['netinfo']['sysname_oid']
n_sn_oid = s_conf['netinfo']['sn_oids']
n_commu = s_conf['netinfo']['community']
n_login_sw = s_conf['netinfo']['login_enable']
n_backup_sw = s_conf['netinfo']['backup_enable']
n_backup_sever = s_conf['netinfo']['tfp_server']
d_pass = s_conf['dockerinfo']['ssh_pass']
starttime = datetime.datetime.now()
'''
扫描主机信息
'''
for nmap_type in s_nets:
unkown_list,key_not_login_list = snmp_begin(nmap_type,s_ports,s_pass,s_keys,s_cmds,s_blacks,s_emails)
'''
扫描网络信息
'''
nm = NmapNet(n_sysname_oid,n_sn_oid,n_commu)
if key_not_login_list:
for item in key_not_login_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net[0],sn=is_net[1],mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_port=key_not_login_list[item][0],ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
print(".........................OtherMachineInfo",item,other_sn)
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"SSH端口存活,无法登录",oth_cab_id=1)
if unkown_list:
for item in unkown_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net,mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"IP存活,非Linux服务器",oth_cab_id=1)
# '''
# 网络设备备份或者登录功能
# '''
# net_login_dct = {}
# with open("%s/conf/net_dev.pass"%BASE_DIR,'r') as f:
# for item in f.readlines():
# ip,username,passwd,en_passwd = re.split("\s+",item)[:4]
# net_login_dct[ip] = (username,passwd,en_passwd)
# if n_login_sw == "True":
# res = NetDevLogin(dev_ips=net_login_dct,backup_sw=n_backup_sw,back_server=n_backup_sever)
'''
规则:主机信息,去重、生成关系字典
'''
ft = FilterRules()
key_ip_dic = ft.run()
'''
梳理虚拟服务器主机于服务器信息
'''
pv = GetHostType()
p_relate_dic = pv.get_host_type(key_ip_dic)
'''
更新宿主机类型中表对应关系
'''
ip_key_dic = {v:k for k,v in key_ip_dic.items()}
docker_p_list = p_relate_dic["docker-containerd"]
kvm_p_list = p_relate_dic["qemu-system-x86_64"]
vmware_p_list = p_relate_dic["vmx"]
for item in docker_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="1")
for item in kvm_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="0")
for item in vmware_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="2")
'''
扫描docker的宿主机和虚拟服务的关系
'''
ds = NmapDocker(s_cmds,d_pass,ip_key_dic)
ds.do_nmap(docker_p_list)
'''
扫描KVM的宿主机和虚拟服务的关系
# '''
ks = NmapKVM(ip_key_dic)
ks.do_nmap(kvm_p_list)
'''
扫描ESXI虚拟机配置
'''
ne = NmapVMX(vmware_p_list,ip_key_dic)
ne.dosnmp()
'''
更新状态表,用户信息表
'''
c_sn_lst = [item.sn_key for item in ConnectionInfo.objects.all()]
o_sn_lst = [item.sn_key for item in OtherMachineInfo.objects.all()]
old_sn_list = [item.sn_key for item in MachineOperationsInfo.objects.all()]
new_sn_lst = c_sn_lst + o_sn_lst
diff_sn_lst = set(new_sn_lst + old_sn_list)
for item in diff_sn_lst:
try:
nsin = MachineOperationsInfo.objects.filter(sn_key=item)
if not nsin:
MachineOperationsInfo.objects.create(sn_key=item)
except Exception as e:
print("Error:SN:%s not insert into database,reason is:%s"%(item,e))
logger.error("Error:SN:%s not insert into database,reason is:%s"%(item,e))
'''
统计总数
'''
info_dic = Machines().get_all_count()
StatisticsRecord.objects.create(all_count=info_dic['all_c'],pyh_count=info_dic['pyh_c'],net_count=info_dic['net_c'],
other_count=info_dic['other_c'],vmx_count=info_dic['vmx_c'],kvm_count=info_dic['kvm_c'],docker_count=info_dic['docker_c'])
endtime = datetime.datetime.now()
totaltime = (endtime - starttime).seconds
logger.info("{Finish:Use time %s s}"%totaltime)
print("{Finish:Use time %s s}"%totaltime)
if __name__ == "__main__":
main()
|
[
"gengming8859@icloud.com"
] |
gengming8859@icloud.com
|
2bb1e7e593dfb67298aa570a9c0e2c150b0dc54b
|
d0bd9c3c5539141c74e0eeae2fa6b7b38af84ce2
|
/src/cogent3/parse/__init__.py
|
7559bc6dcc006e4be1bcd02096d3c56f55fc2512
|
[
"BSD-3-Clause"
] |
permissive
|
KaneWh1te/cogent3
|
150c72e2f80a6439de0413b39c4c37c09c9966e3
|
115e9eb5700627fdb24be61441a7e3e155c02c61
|
refs/heads/master
| 2023-07-29T00:32:03.742351
| 2021-04-20T04:32:00
| 2021-04-20T04:32:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
#!/usr/bin/env python
__all__ = [
"blast",
"cigar",
"clustal",
"dialign",
"ebi",
"fasta",
"gcg",
"genbank",
"gff",
"locuslink",
"ncbi_taxonomy",
"newick",
"nexus",
"paml",
"paml_matrix",
"phylip",
"rdb",
"record",
"record_finder",
"sequence",
"table",
"tinyseq",
"tree",
"tree_xml",
"unigene",
]
__author__ = ""
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = [
"Gavin Huttley",
"Peter Maxwell",
"Rob Knight",
"Catherine Lozupone",
"Jeremy Widmann",
"Matthew Wakefield",
"Sandra Smit",
"Greg Caporaso",
"Zongzhi Liu",
"Micah Hamady",
"Jason Carnes",
"Raymond Sammut",
"Hua Ying",
"Andrew Butterfield",
"Marcin Cieslik",
]
__license__ = "BSD-3"
__version__ = "2021.04.20a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
|
[
"Gavin.Huttley@anu.edu.au"
] |
Gavin.Huttley@anu.edu.au
|
41a00bab3f061077909f54d74dc574355af1929d
|
1b77eaf078321b1320d72aa36a4357568101e4ca
|
/江南大学教务处/test.py
|
93ac06b18e5699d2285b3f417e63ee409aaa3bec
|
[] |
no_license
|
BEE-JN/python_homework
|
92ffc1216a380d124901fd64cc541f70813847dc
|
8ba4ea79cbd422f40e6f9f1cc5fed4d75715d207
|
refs/heads/master
| 2020-03-23T08:02:47.863607
| 2018-07-17T15:30:21
| 2018-07-17T15:30:21
| 141,305,118
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
import requests
import time
url = "https://survey.zkeycc/pku/xsdc/?dm=bk"
if __name__=='__main__':
while 1:
r=requests.get(url)
print(r.content)
time.sleep(1)
|
[
"41156190+GCS-CN@users.noreply.github.com"
] |
41156190+GCS-CN@users.noreply.github.com
|
96c1f73d17d18f7906615ca48bc2e2d25d8b7259
|
caa06eca3eef2549d5088f6487201f734b35822e
|
/multi_ie/EE/model/multi_pointer_net.py
|
0d2334ed6d7a09e87757e36528cedd3c228713c5
|
[] |
no_license
|
kelvincjr/shared
|
f947353d13e27530ba44ea664e27de51db71a5b6
|
4bc4a12b0ab44c6847a67cbd7639ce3c025f38f8
|
refs/heads/master
| 2023-06-23T19:38:14.801083
| 2022-05-17T09:45:22
| 2022-05-17T09:45:22
| 141,774,490
| 6
| 1
| null | 2023-06-12T21:30:07
| 2018-07-21T02:22:34
|
Python
|
UTF-8
|
Python
| false
| false
| 7,694
|
py
|
# _*_ coding:utf-8 _*_
import warnings
import numpy as np
import torch
import torch.nn as nn
from transformers import BertModel
from transformers import BertPreTrainedModel
from .layernorm import ConditionalLayerNorm
#from utils.data_util import batch_gather
warnings.filterwarnings("ignore")
def batch_gather(data: torch.Tensor, index: torch.Tensor):
length = index.shape[0]
t_index = index.cpu().numpy()
t_data = data.cpu().data.numpy()
result = []
for i in range(length):
result.append(t_data[i, t_index[i], :])
return torch.from_numpy(np.array(result)).to(data.device)
class ERENet(nn.Module):
"""
ERENet : entity relation jointed extraction
"""
def __init__(self, encoder, classes_num):
super().__init__()
self.classes_num = classes_num
# BERT model
self.bert = encoder
config = encoder.config
self.token_entity_emb = nn.Embedding(num_embeddings=2, embedding_dim=config.hidden_size,
padding_idx=0)
# self.encoder_layer = TransformerEncoderLayer(config.hidden_size, nhead=4)
# self.transformer_encoder = TransformerEncoder(self.encoder_layer, num_layers=1)
self.LayerNorm = ConditionalLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# pointer net work
self.po_dense = nn.Linear(config.hidden_size, self.classes_num * 2)
self.subject_dense = nn.Linear(config.hidden_size, 2)
self.loss_fct = nn.BCEWithLogitsLoss(reduction='none')
#self.init_weights()
def forward(self, q_ids=None, passage_ids=None, segment_ids=None, attention_mask=None, subject_ids=None,
subject_labels=None,
object_labels=None, eval_file=None,
is_eval=False):
mask = (passage_ids != 0).float()
bert_encoder = self.bert(passage_ids, token_type_ids=segment_ids, attention_mask=mask)[0]
if not is_eval:
# subject_encoder = self.token_entity_emb(token_type_ids)
# context_encoder = bert_encoder + subject_encoder
sub_start_encoder = batch_gather(bert_encoder, subject_ids[:, 0])
sub_end_encoder = batch_gather(bert_encoder, subject_ids[:, 1])
subject = torch.cat([sub_start_encoder, sub_end_encoder], 1)
context_encoder = self.LayerNorm(bert_encoder, subject)
sub_preds = self.subject_dense(bert_encoder)
po_preds = self.po_dense(context_encoder).reshape(passage_ids.size(0), -1, self.classes_num, 2)
subject_loss = self.loss_fct(sub_preds, subject_labels)
# subject_loss = F.binary_cross_entropy(F.sigmoid(sub_preds) ** 2, subject_labels, reduction='none')
subject_loss = subject_loss.mean(2)
subject_loss = torch.sum(subject_loss * mask.float()) / torch.sum(mask.float())
po_loss = self.loss_fct(po_preds, object_labels)
# po_loss = F.binary_cross_entropy(F.sigmoid(po_preds) ** 4, object_labels, reduction='none')
po_loss = torch.sum(po_loss.mean(3), 2)
po_loss = torch.sum(po_loss * mask.float()) / torch.sum(mask.float())
loss = subject_loss + po_loss
return loss
else:
subject_preds = nn.Sigmoid()(self.subject_dense(bert_encoder))
answer_list = list()
for qid, sub_pred in zip(q_ids.cpu().numpy(),
subject_preds.cpu().numpy()):
context = eval_file[qid].bert_tokens
start = np.where(sub_pred[:, 0] > 0.6)[0]
end = np.where(sub_pred[:, 1] > 0.5)[0]
subjects = []
for i in start:
j = end[end >= i]
if i == 0 or i > len(context) - 2:
continue
if len(j) > 0:
j = j[0]
if j > len(context) - 2:
continue
subjects.append((i, j))
answer_list.append(subjects)
qid_ids, bert_encoders, pass_ids, subject_ids, token_type_ids = [], [], [], [], []
for i, subjects in enumerate(answer_list):
if subjects:
qid = q_ids[i].unsqueeze(0).expand(len(subjects))
pass_tensor = passage_ids[i, :].unsqueeze(0).expand(len(subjects), passage_ids.size(1))
new_bert_encoder = bert_encoder[i, :, :].unsqueeze(0).expand(len(subjects), bert_encoder.size(1),
bert_encoder.size(2))
token_type_id = torch.zeros((len(subjects), passage_ids.size(1)), dtype=torch.long)
for index, (start, end) in enumerate(subjects):
token_type_id[index, start:end + 1] = 1
qid_ids.append(qid)
pass_ids.append(pass_tensor)
subject_ids.append(torch.tensor(subjects, dtype=torch.long))
bert_encoders.append(new_bert_encoder)
token_type_ids.append(token_type_id)
if len(qid_ids) == 0:
subject_ids = torch.zeros(1, 2).long().to(bert_encoder.device)
qid_tensor = torch.tensor([-1], dtype=torch.long).to(bert_encoder.device)
po_tensor = torch.zeros(1, bert_encoder.size(1)).long().to(bert_encoder.device)
return qid_tensor, subject_ids, po_tensor
qids = torch.cat(qid_ids).to(bert_encoder.device)
pass_ids = torch.cat(pass_ids).to(bert_encoder.device)
bert_encoders = torch.cat(bert_encoders).to(bert_encoder.device)
# token_type_ids = torch.cat(token_type_ids).to(bert_encoder.device)
subject_ids = torch.cat(subject_ids).to(bert_encoder.device)
flag = False
split_heads = 1024
bert_encoders_ = torch.split(bert_encoders, split_heads, dim=0)
pass_ids_ = torch.split(pass_ids, split_heads, dim=0)
# token_type_ids_ = torch.split(token_type_ids, split_heads, dim=0)
subject_encoder_ = torch.split(subject_ids, split_heads, dim=0)
po_preds = list()
for i in range(len(bert_encoders_)):
bert_encoders = bert_encoders_[i]
# token_type_ids = token_type_ids_[i]
pass_ids = pass_ids_[i]
subject_encoder = subject_encoder_[i]
if bert_encoders.size(0) == 1:
flag = True
# print('flag = True**********')
bert_encoders = bert_encoders.expand(2, bert_encoders.size(1), bert_encoders.size(2))
subject_encoder = subject_encoder.expand(2, subject_encoder.size(1))
# pass_ids = pass_ids.expand(2, pass_ids.size(1))
sub_start_encoder = batch_gather(bert_encoders, subject_encoder[:, 0])
sub_end_encoder = batch_gather(bert_encoders, subject_encoder[:, 1])
subject = torch.cat([sub_start_encoder, sub_end_encoder], 1)
context_encoder = self.LayerNorm(bert_encoders, subject)
po_pred = self.po_dense(context_encoder).reshape(subject_encoder.size(0), -1, self.classes_num, 2)
if flag:
po_pred = po_pred[1, :, :, :].unsqueeze(0)
po_preds.append(po_pred)
po_tensor = torch.cat(po_preds).to(qids.device)
po_tensor = nn.Sigmoid()(po_tensor)
return qids, subject_ids, po_tensor
|
[
"deco_2004@163.com"
] |
deco_2004@163.com
|
b865fa83e9b8e72b08e144110aa75b200cf807d4
|
7ec04fc867d0a48fffc05c65bff9217cfe211fe7
|
/HW/统计字符串/countHotal.py
|
31f95d4500041b28fa209a1a7c4dae778ad024c5
|
[] |
no_license
|
Cherry93/pythonPractic
|
3b9d1f99803503073bbb2f3a58009665338bd278
|
2889183af6c9a01ab47895b23e2d6ce8c288fd4d
|
refs/heads/master
| 2021-08-31T16:41:56.655989
| 2017-12-22T03:53:18
| 2017-12-22T03:53:18
| 115,008,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
'''
统计加州旅馆中所有单词出现的次数,并降序打印
'''
import collections
file = input("Enter a filename:")
with open(file, 'r') as fpr:
content = fpr.read()
content = content.replace("\n", '')
content1 = content.split()
print(content1)
print(content1[0].lower())
print(len(content1))
list =[]
for i in range(0,len(content1)):
list.append(content1[i].lower())
print(list)
print("\n各单词出现的个数:\n%s"%collections.Counter(list))
#content2 = content1.lower()
#print(content1)
|
[
"358544104@qq.com"
] |
358544104@qq.com
|
c7eee2a22a1efb7ddb4b5278189a0424acae6d63
|
14ebcf98e7c64505839e0b7bbab89e32af7abe1e
|
/deep4rec/datasets/census.py
|
4d58743074fe1b8d1eb3004a4a18b2d8469a1f62
|
[
"Apache-2.0"
] |
permissive
|
Luiz-FS/Deep4Rec
|
b021fbc36e377e1055e46e9a52f68c32018894f8
|
78c5ca74f0e0d06a9f4bb2f267817b69abd40d1d
|
refs/heads/master
| 2020-09-03T14:27:45.828007
| 2019-11-18T01:10:30
| 2019-11-18T01:10:30
| 219,484,411
| 0
| 0
|
Apache-2.0
| 2019-11-18T01:10:31
| 2019-11-04T11:22:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,110
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset interface for Census dataset.
Census dataset: https://archive.ics.uci.edu/ml/machine-learning-databases/adult
"""
import os
import urllib.request
import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from deep4rec.datasets.dataset import Dataset
import deep4rec.utils as utils
_CSV_COLUMNS = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
_CSV_COLUMN_DEFAULTS = [
[0],
[""],
[0],
[""],
[0],
[""],
[""],
[""],
[""],
[""],
[0],
[0],
[0],
[""],
[""],
]
class CensusDataset(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult"
def __init__(self, dataset_name, output_dir, *args, **kwargs):
super().__init__(dataset_name, output_dir, *args, **kwargs)
self.train_filename = "adult.data"
self.test_filename = "adult.test"
self.train_url = os.path.join(self.url, self.train_filename)
self.test_url = os.path.join(self.url, self.test_filename)
self.train_path = os.path.join(self.output_dir, self.train_filename)
self.test_path = os.path.join(self.output_dir, self.test_filename)
self.preprocessed_path = os.path.join(self.output_dir, self.dataset_name)
self._ord_encoder = OrdinalEncoder()
self._occupation_ord_encoder = OrdinalEncoder()
self._one_hot_encoder = OneHotEncoder(sparse=False)
def _download_and_clean_file(self, url, filename):
"""Downloads data from url, and makes changes to match the CSV format."""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.gfile.Open(temp_file, "r") as temp_eval_file:
with tf.gfile.Open(filename, "w") as eval_file:
for line in temp_eval_file:
line = line.strip()
line = line.replace(", ", ",")
if not line or "," not in line:
continue
if line[-1] == ".":
line = line[:-1]
line += "\n"
eval_file.write(line)
tf.gfile.Remove(temp_file)
def download(self):
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self._download_and_clean_file(self.train_url, self.train_path)
self._download_and_clean_file(self.test_url, self.test_path)
def check_downloaded(self):
return os.path.exists(self.train_path) and os.path.exists(self.test_path)
def check_preprocessed(self):
return False
def _preprocess(self, filename, train_data=False):
df = pd.read_csv(filename, names=_CSV_COLUMNS)
# Categorical columns
df_base_columns = df[
["education", "marital_status", "relationship", "workclass"]
]
if train_data:
base_columns = self._ord_encoder.fit_transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.fit_transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.fit_transform(
df_base_columns.values
)
else:
base_columns = self._ord_encoder.transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.transform(
df_base_columns.values
)
# Age buckets
buckets = [0, 18, 25, 30, 35, 40, 45, 50, 55, 60, 65, 200]
age_buckets = np.array(
pd.cut(df["age"], buckets, labels=range(len(buckets) - 1)).values
)
wide_columns = np.concatenate(
(base_columns, age_buckets.reshape(-1, 1)), axis=1
)
numerical_columns = df[
["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"]
].values
deep_columns = np.concatenate((one_hot_base_columns, numerical_columns), axis=1)
labels = np.where(df["income_bracket"].values == ">50K", 1, 0)
return wide_columns, deep_columns, occupation_column, labels
def preprocess(self):
self.train_wide_data, self.train_deep_data, self.train_embedding_data, self.train_y = self._preprocess(
self.train_path, train_data=True
)
self.test_wide_data, self.test_deep_data, self.test_embedding_data, self.test_y = self._preprocess(
self.test_path, train_data=False
)
@property
def train_size(self):
return len(self.train_wide_data)
@property
def train_features(self):
return [self.train_embedding_data, self.train_wide_data, self.train_deep_data]
@property
def test_features(self):
return [self.test_embedding_data, self.test_wide_data, self.test_deep_data]
@property
def num_features_one_hot(self):
return len(np.unique(self.train_embedding_data))
@property
def num_features(self):
return 1
|
[
"mariannelinharesm@gmail.com"
] |
mariannelinharesm@gmail.com
|
e469b1c0df202afbdf63411ba8abdbd4527e1190
|
493f99b210303d019f62195ae8dde9d02ee1b81f
|
/indy_node/test/api/test_rich_schema_objects_reply.py
|
56dbda7791bdd23e1fda9ccddcde4b9f7330077d
|
[
"Apache-2.0"
] |
permissive
|
darklordz-217/indy-node
|
745baa357fe739bac20433cb2daa0f7c5a2f2caf
|
4d2f6a9dc0ff136117f8766a4f2cf70b239404e0
|
refs/heads/master
| 2022-10-06T17:01:12.414734
| 2020-06-11T08:49:17
| 2020-06-11T08:49:17
| 271,472,931
| 2
| 0
|
Apache-2.0
| 2020-06-11T09:04:23
| 2020-06-11T06:48:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
import json
import pytest
from indy_common.constants import JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, RICH_SCHEMA, RICH_SCHEMA_ENCODING, \
RICH_SCHEMA_MAPPING, RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RS_MAPPING_TYPE_VALUE, \
RS_ENCODING_TYPE_VALUE, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE
from indy_node.test.api.helper import validate_write_reply, validate_rich_schema_txn, sdk_build_rich_schema_request
from indy_node.test.rich_schema.templates import RICH_SCHEMA_EX1, W3C_BASE_CONTEXT, RICH_SCHEMA_ENCODING_EX1, \
RICH_SCHEMA_MAPPING_EX1, RICH_SCHEMA_CRED_DEF_EX1, RICH_SCHEMA_PRES_DEF_EX1
from plenum.common.util import randomString
from plenum.test.helper import sdk_get_reply, sdk_sign_and_submit_req
# The order of creation is essential as some rich schema object reference others by ID
# Encoding's id must be equal to the one used in RICH_SCHEMA_MAPPING_EX1
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString()),
(RICH_SCHEMA, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_EX1, RICH_SCHEMA_EX1['@id']),
(RICH_SCHEMA_ENCODING, RS_ENCODING_TYPE_VALUE, RICH_SCHEMA_ENCODING_EX1,
"did:sov:1x9F8ZmxuvDqRiqqY29x6dx9oU4qwFTkPbDpWtwGbdUsrCD"),
(RICH_SCHEMA_MAPPING, RS_MAPPING_TYPE_VALUE, RICH_SCHEMA_MAPPING_EX1,
RICH_SCHEMA_MAPPING_EX1['@id']),
(RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RICH_SCHEMA_CRED_DEF_EX1, randomString()),
(RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE, RICH_SCHEMA_PRES_DEF_EX1,
RICH_SCHEMA_PRES_DEF_EX1['@id'])])
def test_rich_schema_object_reply_is_valid(looper, sdk_pool_handle, sdk_wallet_steward,
txn_type, rs_type, content, rs_id):
request = sdk_build_rich_schema_request(looper, sdk_wallet_steward,
txn_type=txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
reply = sdk_get_reply(looper, sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_steward, request))[1]
validate_write_reply(reply)
validate_rich_schema_txn(reply['result']['txn'], txn_type)
|
[
"alexander.sherbakov@dsr-corporation.com"
] |
alexander.sherbakov@dsr-corporation.com
|
bcfcfd42d82934ef66bd39ecc5139583c6a927df
|
f62ff90d7850af458d8f12386fc9ee9134dbe7c1
|
/Plots/Showplots/Model_3/Current_Voltage_Curves.py
|
2d9023dab4df536df56c4202551adad30523eb73
|
[] |
no_license
|
AlexSchmid22191/EIS_R_Sim
|
51b431f078cb455fc38637c192436c0523449565
|
851b061e60811e1e58a5b2fd4e393e529c3f86ac
|
refs/heads/master
| 2023-06-27T17:40:59.177270
| 2021-07-22T11:50:27
| 2021-07-22T11:50:27
| 380,768,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
from matplotlib.pyplot import subplots, show
from matplotlib.style import use
from numpy import load, log10
use('../Show.mplstyle')
data = load('../../../Currents_Resistances_Model_3/Current_Data_Model_3.npy')
fig_hi, ax_hi = subplots(nrows=2, figsize=(6, 8))
fig_me, ax_me = subplots(nrows=2, figsize=(6, 8))
fig_lo, ax_lo = subplots(nrows=2, figsize=(6, 8))
# High oxygen partial pressures
for i in (1400, 1500, 1600, 1700, 1800):
ax_hi[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_hi[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Medium oxygen partial pressures
for i in (1000, 1100, 1200, 1300):
ax_me[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_me[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Low oxygen partial pressures
for i in (500, 600, 700, 800, 900):
ax_lo[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_lo[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_hi[0].set_yscale('log')
ax_me[0].set_yscale('log')
ax_lo[0].set_yscale('log')
ax_hi[1].set_yscale('symlog', linthreshy=1e-1)
ax_me[1].set_yscale('symlog', linthreshy=1e-4)
ax_lo[1].set_yscale('symlog', linthreshy=1e-9)
# ax_hi[0].set_ylim(1e-3, 1e5)
# ax_hi[1].set_ylim(-1e5, 1e0)
# ax_me[0].set_ylim(1e-6, 1e0)
# ax_me[1].set_ylim(-1e0, 1e0)
# ax_lo[0].set_ylim(1e-10, 1e0)
# ax_lo[1].set_ylim(-1e-4, 1e1)
for ax in (ax_hi[0], ax_hi[1], ax_me[0], ax_me[1], ax_lo[0], ax_lo[1]):
ax.set_ylabel('Absolute current density (A/m²)')
ax.set_xlabel('Overpotential (V)')
ax.legend()
# fig_hi.tight_layout()
# fig_hi.savefig('Plots/Current_Voltage_Curves_Hi.pdf')
# fig_hi.savefig('Plots/Current_Voltage_Curves_Hi.png')
#
# fig_me.tight_layout()
# fig_me.savefig('Plots/Current_Voltage_Curves_Me.pdf')
# fig_me.savefig('Plots/Current_Voltage_Curves_Me.png')
#
# fig_lo.tight_layout()
# fig_lo.savefig('Plots/Current_Voltage_Curves_Lo.pdf')
# fig_lo.savefig('Plots/Current_Voltage_Curves_Lo.png')
show()
|
[
"Alex.Schmid91@gmail.com"
] |
Alex.Schmid91@gmail.com
|
690fe2ffb43edf1febae8410ba150129ce00cce0
|
3419067388879d8a6542df01cb0278ae90b021a2
|
/py100day/Day01-15/Day04/code/for2.py
|
22c96886304fa47ecee9b5c39f3f60d9a36a21f1
|
[] |
no_license
|
oweson/python-river-master
|
faa31c5248e297a92054cc302e213e2b37fb8bd5
|
cf9e99e611311b712465eb11dec4bb8f712929b2
|
refs/heads/master
| 2021-06-21T15:47:01.755957
| 2019-10-02T00:08:05
| 2019-10-02T00:08:05
| 205,607,518
| 0
| 0
| null | 2021-06-10T21:55:20
| 2019-08-31T23:39:55
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
"""
用for循环实现1~100之间的偶数求和
Version: 0.1
Author: 骆昊
Date: 2018-03-01
"""
# 步长是2
sum = 0
for x in range(2, 101, 2):
sum += x
print(sum)
|
[
"570347720@qq.com"
] |
570347720@qq.com
|
1026e1d0f5add5bf40edc076405f2e409f26c5ce
|
2f2682f778512a75a1ff49d7e267c2f4d355c48e
|
/geoprocess/controllers.py
|
7be119b34c9b20b609770261e464a475b5996a9b
|
[] |
no_license
|
beatcovid/geoprocess
|
4a44f46b900c2e0ffed0dab18008e7884e759e3b
|
c2a7b1e4ede06583679db9dadebe2066b0274e54
|
refs/heads/master
| 2023-04-13T13:45:48.572825
| 2020-05-27T03:08:14
| 2020-05-27T03:08:14
| 260,215,049
| 0
| 1
| null | 2023-03-29T00:36:19
| 2020-04-30T13:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,538
|
py
|
import csv
import email.utils
import json
import logging
import os
import sys
from datetime import datetime
from pprint import pprint
from dotenv import load_dotenv
from pymongo import MongoClient
from geoprocess.find_psma import find_lga, find_sa3
from geoprocess.google_geo import google_geocode, lookup_placeid, place_autocomplete
from geoprocess.settings import MONGO_CONNECT_URL
load_dotenv()
logger = logging.getLogger("geoprocess")
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
mongo_connection = MongoClient(MONGO_CONNECT_URL)
def flatten_google_place(place, prefix):
ac = place["address_components"]
flattened = {}
for component in ac:
for ctype in component["types"]:
if not ctype == "political":
flattened[prefix + "_" + ctype] = component["short_name"]
return flattened
def get_granuality(flat_geo, prefix):
FIELDS = [
f"{prefix}_postal_code",
f"{prefix}_locality",
f"{prefix}_administrative_area_level_2",
f"{prefix}_administrative_area_level_1",
f"{prefix}_country",
]
for field in FIELDS:
if field in flat_geo:
return field[len(prefix) + 1 :]
return "country"
def update_geoplots():
"""
just a simple q
"""
db = mongo_connection.prod_covid19_api_docdb.instances
query = {"_geo_processed": {"$ne": True}}
processed = 0
updated = 0
place_fields = ["userdetail_city", "travel_country"]
for a in db.find(query).sort("_submission_time", -1):
for place_field in place_fields:
if place_field in a:
if not type(a[place_field]) is str:
continue
if " " in a[place_field]:
continue
try:
p = lookup_placeid(a[place_field])
except Exception as e:
logger.error("Could not find place id for: {}".format(a[place_field]))
logger.error(e)
continue
p_flat = flatten_google_place(p, place_field)
if (
place_field + "_country" in p_flat
and p_flat[place_field + "_country"] == "AU"
and (
place_field + "_locality" in p_flat
or place_field + "_postal_code" in p_flat
)
):
if not place_field + "_lga_id" in a:
lgs = find_lga(
p["geometry"]["location"]["lat"],
p["geometry"]["location"]["lng"],
)
if lgs:
p_flat[place_field + "_lga_id"] = lgs
if not place_field + "_sa3_id" in a:
sa3 = find_sa3(
p["geometry"]["location"]["lat"],
p["geometry"]["location"]["lng"],
)
if sa3:
p_flat[place_field + "_sa3_id"] = sa3
p_flat[place_field + "_granuality"] = get_granuality(p_flat, place_field)
if (
place_field + "_country" in p_flat
and p_flat[place_field + "_country"] == "AU"
and (
place_field + "_administrative_area_level_1" in p_flat
or "userdetail_city_postal_code" in p_flat
)
):
p_flat[place_field + "_state"] = p_flat[
place_field + "_administrative_area_level_1"
]
p_flat["_geo_processed"] = True
pprint(p_flat)
try:
db.update_one(
{"_id": a["_id"]}, {"$set": p_flat},
)
except Exception as e:
logger.error(
"Db error on updating place_id: {} {}".format(
a["_id"], place_field
)
)
logger.error(e)
continue
logger.info(
"Updated {} {} -> {}".format(place_field, a["_id"], a[place_field])
)
updated += 1
processed += 1
print("Processed {} and updated {}".format(processed, updated))
|
[
"nc9@protonmail.com"
] |
nc9@protonmail.com
|
c0bccab0f33fe2f6323731cddd1742ba4d45275c
|
aa410a95773aeea73e75f0e701db5cdc0eda890b
|
/weapons.py
|
cf6e4eb05ba6ad8a453e07637018051ed6eac5f8
|
[] |
no_license
|
predominant/zombsole
|
ccc00893b7739c5341c43fc28375415fa628b885
|
a04ff40a144cb1f63d8aa29ccf0b06ecccc2bc7f
|
refs/heads/master
| 2021-01-21T19:29:05.322551
| 2014-03-26T05:38:15
| 2014-03-26T05:38:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
# coding: utf-8
from core import Weapon
def _new_weapon_class(name, max_range, damage_range):
'''Create new weapon class.'''
class NewWeapon(Weapon):
def __init__(self):
super(NewWeapon, self).__init__(name,
max_range,
damage_range)
NewWeapon.__name__ = name
return NewWeapon
ZombieClaws = _new_weapon_class('ZombieClaws', 1.5, (5, 10))
Knife = _new_weapon_class('Knife', 1.5, (5, 10))
Axe = _new_weapon_class('Axe', 1.5, (75, 100))
Gun = _new_weapon_class('Gun', 6, (10, 50))
Rifle = _new_weapon_class('Rifle', 10, (25, 75))
Shotgun = _new_weapon_class('Shotgun', 3, (75, 100))
|
[
"fisadev@gmail.com"
] |
fisadev@gmail.com
|
2eac0fe3402f79f389178ebe792a10a16f7c1a4a
|
039f2c747a9524daa1e45501ada5fb19bd5dd28f
|
/AGC001/AGC001c.py
|
6f3fb7892212fb5a2683a833717ea55a344d0dfd
|
[
"Unlicense"
] |
permissive
|
yuto-moriizumi/AtCoder
|
86dbb4f98fea627c68b5391bf0cc25bcce556b88
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
refs/heads/master
| 2023-03-25T08:10:31.738457
| 2021-03-23T08:48:01
| 2021-03-23T08:48:01
| 242,283,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
#AGC001c
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
# map(int, input().split())
if __name__ == '__main__':
main()
|
[
"kurvan1112@gmail.com"
] |
kurvan1112@gmail.com
|
eaeef1d5a47d3ff5621d988c694458cf63dc39a6
|
ceab178d446c4ab55951c3d65d99815e9fdee43a
|
/archive/coding_practice/python/ticks_plot.py
|
83e7d35370f009514aa95366b78a92f4f61f0afa
|
[] |
no_license
|
DeneBowdalo/AtmosChem_Tools
|
01ecedb0df5c5d6e01966a0c3d8055826f5ac447
|
220c2f697a4f4c1e5443c336ede923b2004fe9f5
|
refs/heads/master
| 2021-01-10T18:05:30.800218
| 2017-02-06T16:08:14
| 2017-02-06T16:08:14
| 43,529,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import matplotlib.pyplot as plt
x = [5,3,7,2,4,1,11,25,33]
plt.plot(x)
plt.xticks(range(len(x)), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']);
plt.yticks(range(1,36,2));
plt.show()
|
[
"db876@earth0.york.ac.uk"
] |
db876@earth0.york.ac.uk
|
def39a55d547e1131e0f8dcf639f5da81e09bb90
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/cGaTqHsPfR5H6YBuj_0.py
|
c3936bfae1158025ccd064458e0c9c17ee2d0b5e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""
Given a list of ingredients `i` and a flavour `f` as input, create a function
that returns the list, but with the elements `bread` around the selected
ingredient.
### Examples
make_sandwich(["tuna", "ham", "tomato"], "ham") ➞ ["tuna", "bread", "ham", "bread", "tomato"]
make_sandwich(["cheese", "lettuce"], "cheese") ➞ ["bread", "cheese", "bread", "lettuce"]
make_sandwich(["ham", "ham"], "ham") ➞ ["bread", "ham", "bread", "bread", "ham", "bread"]
### Notes
* You will always get valid inputs.
* Make two separate sandwiches if two of the same elements are next to each other (see example #3).
"""
def make_sandwich(ingredients, flavour):
sandwich = []
for i in ingredients:
sandwich += ['bread', i, 'bread'] if i == flavour else [i]
return sandwich
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b01ea9b981eaf809aed4db02cdf99add3ef4992e
|
a4753147801dbabfec45f6f9f47572cda77efb81
|
/debugging-constructs/ibmfl/util/data_handlers/mnist_pytorch_data_handler.py
|
29cc18afb938e575e71025d9007fd67f722221b9
|
[
"MIT"
] |
permissive
|
SEED-VT/FedDebug
|
e1ec1f798dab603bd208b286c4c094614bb8c71d
|
64ffa2ee2e906b1bd6b3dd6aabcf6fc3de862608
|
refs/heads/main
| 2023-05-23T09:40:51.881998
| 2023-02-13T21:52:25
| 2023-02-13T21:52:25
| 584,879,212
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,460
|
py
|
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20221069
© Copyright IBM Corp. 2022 All Rights Reserved.
"""
import logging
import numpy as np
from ibmfl.data.data_handler import DataHandler
from ibmfl.util.datasets import load_mnist
logger = logging.getLogger(__name__)
class MnistPytorchDataHandler(DataHandler):
def __init__(self, data_config=None):
super().__init__()
self.file_name = None
if data_config is not None:
if 'npz_file' in data_config:
self.file_name = data_config['npz_file']
# load the datasets
(self.x_train, self.y_train), (self.x_test, self.y_test) = self.load_dataset()
# pre-process the datasets
self.preprocess()
def get_data(self):
"""
Gets pre-process mnist training and testing data.
:return: training data
:rtype: `tuple`
"""
return (self.x_train, self.y_train), (self.x_test, self.y_test)
def load_dataset(self, nb_points=500):
"""
Loads the training and testing datasets from a given local path.
If no local path is provided, it will download the original MNIST \
dataset online, and reduce the dataset size to contain \
500 data points per training and testing dataset.
Because this method
is for testing it takes as input the number of datapoints, nb_points,
to be included in the training and testing set.
:param nb_points: Number of data points to be included in each set if
no local dataset is provided.
:type nb_points: `int`
:return: training and testing datasets
:rtype: `tuple`
"""
if self.file_name is None:
(x_train, y_train), (x_test, y_test) = load_mnist()
x_train = x_train[:nb_points]
y_train = y_train[:nb_points]
x_test = x_test[:nb_points]
y_test = y_test[:nb_points]
else:
try:
logger.info('Loaded training data from ' + str(self.file_name))
data_train = np.load(self.file_name)
x_train = data_train['x_train']
y_train = data_train['y_train']
x_test = data_train['x_test']
y_test = data_train['y_test']
except Exception:
raise IOError('Unable to load training data from path '
'provided in config file: ' +
self.file_name)
return (x_train, y_train), (x_test, y_test)
def preprocess(self):
"""
Preprocesses the training and testing dataset, \
e.g., reshape the images according to self.channels_first; \
convert the labels to binary class matrices.
:return: None
"""
img_rows, img_cols = 28, 28
self.x_train = self.x_train.astype('float32').reshape(self.x_train.shape[0], 1, img_rows, img_cols)
self.x_test = self.x_test.astype('float32').reshape(self.x_test.shape[0], 1,img_rows, img_cols)
# print(self.x_train.shape[0], 'train samples')
# print(self.x_test.shape[0], 'test samples')
self.y_train = self.y_train.astype('int64')
self.y_test = self.y_test.astype('int64')
# print('y_train shape:', self.y_train.shape)
# print(self.y_train.shape[0], 'train samples')
# print(self.y_test.shape[0], 'test samples')
|
[
"waris@vt.edu"
] |
waris@vt.edu
|
7eced97eac47dfd2ce21cee31fe289634f7a5bf7
|
eac6dc8eb8e5f088500f425a7323cd35a4f99bd6
|
/src/courses/migrations/0012_course_active.py
|
af89db3155df4d47be9b84b4c843f0b847c617a6
|
[] |
no_license
|
aminhp93/django_serverup_2
|
a14195af756799795282028ba611dbccc3848870
|
aef31722e882367c731e9e48fc8af8740befc112
|
refs/heads/master
| 2020-05-27T01:54:15.268661
| 2017-02-25T21:58:36
| 2017-02-25T21:58:36
| 82,514,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 18:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0011_auto_20170219_1749'),
]
operations = [
migrations.AddField(
model_name='course',
name='active',
field=models.BooleanField(default=True),
),
]
|
[
"minhpn.org.ec@gmail.com"
] |
minhpn.org.ec@gmail.com
|
ba63f7efdf10aab9c7481c9a2bee33143ac12df2
|
2037235643046608bf883f11c1bc448e2df8a4a3
|
/HuaYing/practice/test14.py
|
a18f331036c28c57f36f4079f83d4f9d3c4a6650
|
[] |
no_license
|
Hardworking-tester/HuaYingAutoTest
|
7e46dfb0729961cee0da06762fc0be11724ad80b
|
c1f0cf7aa4433f482bbae88d1a5637b9859359ca
|
refs/heads/master
| 2021-01-10T18:38:37.788736
| 2015-09-05T10:37:10
| 2015-09-05T10:37:10
| 41,957,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
#encoding:utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
br=webdriver.Firefox()
# br.maximize_window()
br.get("http://www.xebest.com:8000")
elements=br.find_elements_by_class_name("nav-arrow")
element1=elements[4]
if element1.is_displayed():
print ("网站导航链接已定位到")
else:
print ("网站导航元素未找到,请更换定位方式后重新定位")
# if br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a").is_displayed():
# if br.find_element_by_css_selector("div#topnav>ul:first>li:nth(4)>div:nth(1)>ul:nth(1)>li(1)>a").is_displayed():
# if br.find_element_by_css_selector("li#all_menu>ul:nth(0)>li:nth(0)>a>span").is_displayed():
# if br.find_element_by_link_text(u"易支付").is_displayed():
# print ("易支付元素已找到")
# else:
# print("易支付元素未找到,请更换定位方式后重新定位")
# epay=br.find_element_by_css_selector("div#topnav>ul>li:nth(4)>div:nht(1)>ul:nth(1)>li(1)>a")
# epay=br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a")
# epay=br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a")
epay=br.find_element_by_link_text(u"易支付")
ActionChains(br).move_to_element(element1).click(element1).perform()
ActionChains(br).move_to_element(epay).click(epay).perform()
|
[
"373391120@qq.com"
] |
373391120@qq.com
|
636022ef17714db27f131c08daa673606f4185d8
|
511b7b19ec49be34bec240ee7c7cf4178cd36ca3
|
/gasolinestation/migrations/0013_auto_20200304_0909.py
|
fb6c52a8156995aa62443e5a937be261f2953067
|
[] |
no_license
|
francisguchie/360POS
|
58de516fe52e83d6b99bd195d22c8aa902daee18
|
68f9e20ac263c75ec0c9b0fe75d7f648b8744ea8
|
refs/heads/master
| 2023-02-08T16:38:42.667538
| 2020-03-12T16:05:00
| 2020-03-12T16:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# Generated by Django 3.0.3 on 2020-03-04 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gasolinestation', '0012_transactionsales'),
]
operations = [
migrations.AddField(
model_name='transactionsales',
name='dispensed_liter',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='transactionsales',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
]
|
[
"monde.lacanlalay@gmail.com"
] |
monde.lacanlalay@gmail.com
|
86089daeedc71651ae0564812bf24553d130050a
|
f399fbac7e35dcc2c2f2ad4d3202b0839d9b7d48
|
/user/send_mail.py
|
0cb781b2301d5d6442e6f1cfdfd49aada05a621f
|
[] |
no_license
|
AktanKasymaliev/django-toilets-service
|
480f56b652a88e1422290de8906f0bb6d5693cff
|
225d71b164c36bab5fded86390b17ce265694a17
|
refs/heads/main
| 2023-07-14T12:46:12.399114
| 2021-08-23T17:14:04
| 2021-08-23T17:14:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import send_mail
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from decouple import config
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from .token import account_activation_token
def send_confirmation_email(request, user):
context = {
"small_text_detail": "Thank you for "
"creating an account. "
"Please verify your email "
"address to set up your account.",
"email": user.email,
"domain":get_current_site(request).domain,
"uid":urlsafe_base64_encode(force_bytes(user.pk)),
"token":account_activation_token.make_token(user)
}
current_site = get_current_site(request)
mail_subject = 'Active your account'
to_email = user.email
message = render_to_string('account/email.html', context)
email = EmailMultiAlternatives(
mail_subject,
message,
from_email=config('EMAIL_HOST_USER'),
to = [user.email],
)
email.content_subtype = 'html'
email.send(fail_silently=True)
print("ВСЕ ПРОШЛО УСПЕШНО EMAIL SENT")
|
[
"aktan.kasymaliev@icloud.com"
] |
aktan.kasymaliev@icloud.com
|
2ed87c256e5bf9f70115c96c9aec2798f8b5a5af
|
14913a0fb7e1d17318a55a12f5a181dddad3c328
|
/63.snake.py
|
990234c17a8d9d056195b13ae470723aa887b84e
|
[] |
no_license
|
Jesuisjavert/Algorithm
|
6571836ec23ac3036565738c2bee94f416595f22
|
730549d19e66e20b3474a235a600958a8e036a0e
|
refs/heads/master
| 2023-02-16T06:34:50.984529
| 2020-09-25T09:40:30
| 2020-09-25T09:40:30
| 330,849,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
import sys
sys.stdin = open('input.txt','r')
T = int(input())
for testcase in range(1,T+1):
arr = [list(map(int, input().split())) for _ in range(4)]
print(arr)
|
[
"jesuisjavert@gmail.com"
] |
jesuisjavert@gmail.com
|
beed14a3c1aff89d035020396a37556f4cf88ed1
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/wicd/rev519-537/right-branch-537/wicd/backends/be-wireless/threadedwirelessinterface.py
|
ab1a5d1e45f9fa860b190118e1d14d918ce5832a
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
from misc import WicdError
from baseinterface import needsidle
from encryptablewirelessinterface import EncryptableWirelessInterface
from asyncrunner import AsyncManager, AsyncError
class ThreadedWirelessInterface(EncryptableWirelessInterface):
def __init__(self, interface_name):
EncryptableWirelessInterface.__init__(self, interface_name)
self.__async_manager = AsyncManager()
def scan(self, finished_callback):
''' Performs a scan. Scanning is done asynchronously. '''
def _do_scan(abort_if_needed, self):
return EncryptableWirelessInterface._do_scan(self)
def finish_up(result):
print 'scan finished', result
self.networks = result
finished_callback()
self.__async_manager.run(_do_scan, finish_up, self)
def connect(self, finished_callback):
''' Attempts to connect. Connecting is done asynchronously.'''
def _do_connect(abort_if_needed, interface, network):
print 'connecting...'
print interface
print network
import time
while True:
time.sleep(10)
print 'in connecting thread...'
abort_if_needed()
print 'done connecting'
def finish_up(result):
finished_callback()
self.__async_manager.run(_do_connect, finish_up, self,
self.current_network,
name='connect')
def cancel_connection_attempt(self):
''' Cancel the current attempt to connect to the network. '''
self.__async_manager.stop('connect')
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
be7975289ea7397570ae5a442d590aae139acd82
|
214dde26c268d1d0b7991318c5e2d43aa27af89b
|
/backlooking/order_analysis.py
|
c7b7acc13a43f9796ee1e1050048258fb6cc19ad
|
[] |
no_license
|
hellobiek/smart_deal_tool
|
f1846903ac402257bbe92bd23f9552970937d50e
|
ba8aad0a37843362f5833526921c6f700fb881f1
|
refs/heads/master
| 2022-09-04T04:41:34.598164
| 2022-08-04T22:04:09
| 2022-08-04T22:04:09
| 88,258,362
| 36
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,559
|
py
|
#-*- coding: utf-8 -*-
import os
import sys
from os.path import abspath, dirname
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import datetime
import const as ct
import pandas as pd
from futu import TrdEnv
from datetime import datetime
from base.cdate import get_dates_array
from tools.markdown_table import MarkdownTable
from tools.markdown_writer import MarkdownWriter
from algotrade.broker.futu.fututrader import FutuTrader
pd.options.mode.chained_assignment = None
def get_total_profit(orders):
buy_orders = orders.loc[orders.trd_side == 'BUY']
buy_orders = buy_orders.reset_index(drop = True)
sell_orders = orders.loc[orders.trd_side == 'SELL']
sell_orders = sell_orders.reset_index(drop = True)
total_sell_value = (sell_orders['dealt_qty'] * sell_orders['dealt_avg_price']).sum()
total_buy_value = (buy_orders['dealt_qty'] * buy_orders['dealt_avg_price']).sum()
return total_sell_value - total_buy_value
def generate(orders, date_arrary, dirname, start, end):
filename = 'form_%s_to_%s_tading_review.md' % (start, end)
os.makedirs(dirname, exist_ok = True)
fullfilepath = os.path.join(dirname, filename)
orders = orders[['code', 'trd_side', 'dealt_qty', 'dealt_avg_price', 'create_time', 'updated_time']]
total_profit = get_total_profit(orders)
md = MarkdownWriter()
md.addTitle("%s_%s_交割单" % (start, end), passwd = '909897')
md.addHeader("交割单分析", 1)
md.addHeader("总收益分析", 2)
t_index = MarkdownTable(headers = ["总收益"])
t_index.addRow(["%s" % total_profit])
md.addTable(t_index)
md.addHeader("交割单复盘", 2)
for cdate in date_arrary:
md.addHeader("%s_交割单" % cdate, 3)
order_info = orders.loc[orders['create_time'].str.startswith(cdate)]
order_info.at[:, 'create_time'] = order_info.loc[:, 'create_time'].str.split().str[1].str[0:8]
order_info = order_info.reset_index(drop = True)
t_index = MarkdownTable(headers = ["名称", "方向", "数量", "价格", "创建时间", "完成时间", "对错", "分析"])
for index in range(len(order_info)):
data_list = order_info.loc[index].tolist()
content_list = [data_list[0], data_list[1], int(data_list[2]), round(data_list[3], 2), data_list[4], data_list[5].split(' ')[1].strip()[0:8], '', '']
content_list = [str(i) for i in content_list]
t_index.addRow(content_list)
md.addTable(t_index)
md.addHeader("本周总结", 2)
md.addHeader("优点", 3)
md.addHeader("缺点", 3)
md.addHeader("心得", 3)
with open(fullfilepath, "w+") as f:
f.write(md.getStream())
def main():
#dirname = '/Volumes/data/quant/stock/data/docs/blog/hellobiek.github.io/source/_posts'
dirname = '/Users/hellobiek/Documents/workspace/blog/blog/source/_posts'
unlock_path = "/Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/configure/follow_trend.json"
key_path = "/Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/configure/key.pri"
futuTrader = FutuTrader(host = ct.FUTU_HOST_LOCAL, port = ct.FUTU_PORT, trd_env = TrdEnv.REAL, market = ct.US_MARKET_SYMBOL, unlock_path = unlock_path, key_path = key_path)
start = '2020-08-11'
end = '2020-08-12'
orders = futuTrader.get_history_orders(start = start, end = end)
date_arrary = get_dates_array(start, end, dformat = "%Y-%m-%d", asending = True)
generate(orders, date_arrary, dirname, start, end)
if __name__ == "__main__":
main()
|
[
"hellobiek@gmail.com"
] |
hellobiek@gmail.com
|
ba7f120c0d5551658bacbd572127dbb325214ffa
|
11b420a9e6dbe371167227f41ef8e344e3382612
|
/ConvNets/Comparison_Plots/Pooled_Images/Pooled_Images.py
|
15a23b6ae92fc9bdfccb8654ccf3350027e0953e
|
[
"MIT"
] |
permissive
|
tarek-ullah/Active-Learning-Bayesian-Convolutional-Neural-Networks
|
7092386758b68dc922efaa2c2eba055930bf2896
|
f8b68038bd3b97c473e9c1de6b6cdee4538021f4
|
refs/heads/master
| 2021-01-13T06:57:19.343775
| 2016-11-02T12:22:16
| 2016-11-02T12:22:16
| 81,338,773
| 1
| 0
| null | 2017-02-08T14:34:15
| 2017-02-08T14:34:15
| null |
UTF-8
|
Python
| false
| false
| 3,650
|
py
|
from __future__ import print_function
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad, Adam
from keras.utils import np_utils, generic_utils
from six.moves import range
import numpy as np
import scipy as sp
from keras import backend as K
import random
import scipy.io
import matplotlib.pyplot as plt
from keras.regularizers import l2, activity_l2
from scipy.stats import mode
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between tran and test sets
(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()
X_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_Pool = X_train_All[5000:15000, :, :, :]
y_Pool = y_train_All[5000:15000]
Total_Pooled_Images = 400
Bald_Pool = np.load('Bald_Pool.npy')
print('Pooling Dropout Bald Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Bald_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Bald_Pool_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Dropout_Max_Entropy_Pool = np.load('Dropout_Max_Entropy_Pool.npy')
print('Pooling Dropout Max Entropy Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Dropout_Max_Entropy_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Dropout_Max_Entropy_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
# Segnet_Pool = np.load('Segnet_Pool.npy')
# print('Pooling Bayes Segnet Images')
# #saving pooled images
# for im in range(Total_Pooled_Images):
# Image = X_Pool[Segnet_Pool[im], :, :, :]
# img = Image.reshape((28,28))
# sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Segnet_Pool_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Variation_Ratio_Pool = np.load('Variation_Ratio_Pool.npy')
print('Pooling Variation Ratio Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Variation_Ratio_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Variation_Ratio_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Max_Entropy_Pool = np.load('Max_Entropy_Pool.npy')
print('Pooling Max Entropy Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Max_Entropy_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Max_Entropy_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
Random_Pool = np.load('Random_Pool.npy')
print('Pooling Random Acquisition Images')
#saving pooled images
for im in range(Total_Pooled_Images):
Image = X_Pool[Random_Pool[1+im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/RESULTS/Cluster_Experiment_Results/2nd/Pooled_Images/Random_Images/'+'Pooled'+'_Image_'+str(im)+'.jpg', img)
|
[
"riashat.islam.93@gmail.com"
] |
riashat.islam.93@gmail.com
|
eee47352250b1354c790e2f7624fae5c7205dbdd
|
d45b87ba22649cb9c0f003479112c50a7ce09ba0
|
/Counting Sort 3.py
|
65bd53aba0bb44a886e5ed534ec574b1d9fdc902
|
[] |
no_license
|
chishui/HackerRankAlgorithmsChallenge
|
7458f6553f52846b9de5b68c0f692f72be13dfa8
|
611096a0c362675ce68598065ea3fe0abbbe5b99
|
refs/heads/master
| 2020-12-24T13:35:43.829308
| 2014-09-02T10:36:57
| 2014-09-02T10:36:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
#https://www.hackerrank.com/challenges/countingsort3
N = input()
li = [int(raw_input().strip().split()[0]) for i in range(0, N)]
li.sort()
last = -1
index = 0
out = []
for i in range(0, 100):
while index < len(li) and i >= li[index] :
index = index + 1
out.append(index)
print ' '.join(map(str, out))
|
[
"chishui2@gmail.com"
] |
chishui2@gmail.com
|
79e89da491df1b01cf2db1375aa85bf04472dfce
|
f29a31354a66798e2c398fc2a01bc285b6e35dfb
|
/NeuralNetworks/l-IntroToNeuralNetworks/Perceptrons.py
|
8b97e96224a7febd95bb5ca02c32f3a2c2cb5e9d
|
[] |
no_license
|
ajpiter/UdacityDeepLearning
|
2fd8b6ba7f29aa03ab9dfdd557dbdcc692e7ada0
|
eb343a8be223f4bcc15a87483f7945023c2c9a0e
|
refs/heads/master
| 2021-01-02T09:00:34.221125
| 2017-08-28T16:32:45
| 2017-08-28T16:32:45
| 99,121,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
#Perceptrons
#Also known as neurons
#Inputs
#Weights
#Start out as random values, then as the neural network learns more about the input data and results the network adjusts the weights
#The process of adjusting the weights is called training the neural network
#The higher the weight the more important it is in determining the output
# 'W' represents a matrix of weights
# 'w' represents an indivdual weight
#Linear combination
#Multiple weights times inputs and sum them
#Start at i = 1
#Evaluate (w1 * x1) and remember the results
#move to i = 2
#Evaluate (w2 * x2) and add these results to (w1 * x1)
#Continue repeating that process until i = mi where m is the number of inputs
#Example, if we had two inputs, (w1 * x1) + (w2 * x2)
#Output signal
#Done by feeding the linear combination into an activation function
#Activation functions are functions that decide, given the inputs to the node what should be the nodes outputs.
#The output layer is referred to as activations
#Heaviside step function
#An activation function that returns a 0 if the linear combination is less than 0.
#It returns a 1 if the linear combination is positive or equal to zero.
#Think of 1 as yes and 0 as no or True/False
#Bias
#one way to get a function to return 1 for more inputs is to add a value to the results of the linear combination
#Bias is represented in equations as b
#Similar to weights the bias can be updated and changed by the neural network durning training
#weights and bias are initially assigned a random value and then they are updated using a learning algorithm like gradient descent.
#The weights and biases change so that the next training example is more accurate and patterns are learned by the neural network.
|
[
"noreply@github.com"
] |
ajpiter.noreply@github.com
|
72655e0d239fb7752d956948112e58f2ba5f52b8
|
3637fe729395dac153f7abc3024dcc69e17f4e81
|
/reference/ucmdb/discovery/os_platform_discoverer.py
|
02d93f540190842835fd968afa055cc09e7172c3
|
[] |
no_license
|
madmonkyang/cda-record
|
daced6846c2456f20dddce7f9720602d1583a02a
|
c431e809e8d0f82e1bca7e3429dd0245560b5680
|
refs/heads/master
| 2023-06-15T08:16:46.230569
| 2021-07-15T16:27:36
| 2021-07-15T16:27:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,272
|
py
|
# coding=utf-8
'''
Created on Dec 27, 2013
@author: ekondrashev
'''
import logger
import entity
import command
import flow
import post_import_hooks
import service_loader
from service_loader import load_service_providers_by_file_pattern
class Platform(entity.Immutable):
def __init__(self, name):
self.name = name
def __eq__(self, other):
if isinstance(other, Platform):
return self.name.lower() == other.name.lower()
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __key__(self):
return (self.name, )
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls, repr(self.name),)
class __PlatformsEnum(entity.Immutable):
def __init__(self, **platforms):
self.__platforms = platforms
def __getattr__(self, name):
value = self.__platforms.get(name)
if value:
return value
raise AttributeError
def values(self):
return self.__platforms.values()
def by_name(self, name):
for platform in self.values():
if platform == name:
return platform
def merge(self, **platforms):
self.__platforms.update(platforms)
enum = __PlatformsEnum()
class Discoverer(object):
def is_applicable(self, shell):
r'''
Returns if current discoverer implementation can be applied againt the
shell passed.
@types: shellutils.Shell-> bool
'''
raise NotImplementedError('is_applicable')
def get_platform(self, shell):
r'shellutils.Shell -> os_platform_discoverer.Platform'
raise NotImplementedError('get_platform')
def find_discoverer_by_shell(shell):
r'''
@types: shellutils.Shell -> os_platform_discoverer.Discoverer
@raise ValueError: if shell is not passed
@raise flow.DiscoveryException: if no os platform discoverer found
'''
if not shell:
raise ValueError('Invalid shell')
discoverers = service_loader.global_lookup[Discoverer]
for discoverer in discoverers:
if discoverer.is_applicable(shell):
return discoverer
raise flow.DiscoveryException('No os platform discoverer '
'implementation found')
def discover_platform_by_shell(shell):
r'''
@types: shellutils.Shell -> os_platform_discoverer.Platform
@raise ValueError: if shell is not passed
@raise flow.DiscoveryException: if no os platform discoverer found
or on platform discovery error
'''
discoverer = find_discoverer_by_shell(shell)
try:
return discoverer.get_platform(shell)
except command.ExecuteException, e:
raise flow.DiscoveryException(e)
@post_import_hooks.invoke_when_loaded(__name__)
def __load_plugins(module):
logger.debug('Loading os platforms')
load_service_providers_by_file_pattern('*_os_platform_discoverer.py')
logger.debug('Finished loading platforms: %s' % enum.values())
|
[
"silentbalanceyh@126.com"
] |
silentbalanceyh@126.com
|
27fac4f1aaf8414c571f63b38f3416535871b864
|
e7fcc1d64cd95805918ab1b5786bf81a92f973ef
|
/2020/day06/test_day06.py
|
dcfa4fa5d4d7f186a72866d92f905fc5c31bff00
|
[] |
no_license
|
trolen/advent-of-code
|
8145c1e36fea04e53d4b7a885efcc2da71fbfe57
|
0a4e022a6a810d86e044a15036a2f5778f0d38af
|
refs/heads/master
| 2023-02-26T13:11:58.341006
| 2023-02-20T23:22:27
| 2023-02-20T23:22:27
| 54,579,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
#! /usr/bin/env python3
import unittest
import day06
class TestDay06(unittest.TestCase):
def setUp(self):
self.raw_data = [
'abc',
'',
'a',
'b',
'c',
'',
'ab',
'ac',
'',
'a',
'a',
'a',
'a',
'',
'b'
]
self.groups = day06.parse_data(self.raw_data)
def test_unique_chars(self):
self.assertEqual('abc', day06.get_unique_chars(['ab', 'ac']))
def test_common_chars(self):
self.assertEqual('a', day06.get_common_chars(['ab', 'ac']))
def test_part1(self):
self.assertEqual(11, day06.do_part1(self.groups))
def test_part2(self):
self.assertEqual(6, day06.do_part2(self.groups))
if __name__ == '__main__':
unittest.main()
|
[
"timothy.rolen@gmail.com"
] |
timothy.rolen@gmail.com
|
cdb896df7dafbf9b574f7853ffe03b2a0ab849e0
|
5c4cc78698a8cdadb10c45799a67c95ca17a4d5a
|
/custom_components/usage.py
|
f93d2655364330efe4fac2599f2b0bc5244848ee
|
[] |
no_license
|
gitumarkk/dash-custom-components-blog
|
fb044f14735d686bbf0c3e07b863c0eb39830c6b
|
3a94e3fd7e3047eb082be901f2c2962b42b27964
|
refs/heads/main
| 2023-05-31T06:40:33.337975
| 2021-06-11T06:22:31
| 2021-06-11T06:22:31
| 375,925,178
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import custom_components
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div([
custom_components.MyCustomComponent(
id='input',
value='my-value',
label='my-label'
),
html.Div(id='output')
])
@app.callback(Output('output', 'children'), [Input('input', 'value')])
def display_output(value):
return 'You have entered {}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"gitumarkk@gmail.com"
] |
gitumarkk@gmail.com
|
191db6f8ca5bb50f81b0a602940e6003d3f27b1b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/4/usersdata/112/1803/submittedfiles/swamee.py
|
0993e51e919e7a77d0aa3da61db48e12e1ca660d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
f= input('Digite o valor de f:')
L= input('Digite o valor de L:')
Q= input('Digite o valor de Q:')
DeltaH= input('Digite o valor de Delta H')
v= input('Digite o valor de v')
g= 9.81
e= 0.000002
D= ((8*f*L*Q)*(2/5))/((math.pi)**2(g*DeltaH))
Rey= (4*Q)/((math.pi)*D*v)
K= (0.25)/(math.log10((e/3.70)+(5.74)/(Rey)**0.9))**2
print('D=%.4f'%D)
print('Rey=%.4f'%Rey)
print('K=%.4f'%K)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
312460ac8bca6af84350206ee751570d59d027a3
|
1a639d185f9c883b7bebf33c577c58b22ac93c7e
|
/graphics/moving_pan.py
|
37ad05954d8428b128a6f418e6a6d7232f4c8994
|
[] |
no_license
|
gofr1/python-learning
|
bd09da5b5850b1533a88b858690ed4380b55d33e
|
19343c985f368770dc01ce415506506d62a23285
|
refs/heads/master
| 2023-09-02T15:42:27.442735
| 2021-11-12T10:17:13
| 2021-11-12T10:17:13
| 237,828,887
| 0
| 0
| null | 2021-11-12T10:17:14
| 2020-02-02T20:03:42
|
Python
|
UTF-8
|
Python
| false
| false
| 767
|
py
|
from superwires import games
path_to_images = '../../Pictures/img/'
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Pan(games.Sprite):
'''Pan moving with mouse'''
def update(self):
'''Move object to mouse position'''
self.x = games.mouse.x
self.y = games.mouse.y
def main():
wall_image = games.load_image(path_to_images + "wall.jpg", transparent=False)
games.screen.background = wall_image
pan_image = games.load_image(path_to_images + "PizzaPan.png")
the_pan = Pan(
image = pan_image,
x = games.mouse.x,
y = games.mouse.y
)
games.screen.add(the_pan)
games.mouse.is_visible = False # mouse pointer is invisible
games.screen.mainloop()
# go!
main()
|
[
"gofr.one@gmail.com"
] |
gofr.one@gmail.com
|
87610080866d9cad3191923528acbeeed82d6233
|
547548a6ae8db52b1b183d6f3ba3ad63f4247962
|
/train/gen/kl/paths.py
|
5bc1abac26c206f66572dc7988a9abdd8620b8c0
|
[
"MIT"
] |
permissive
|
jeffkrupa/SubtLeNet
|
21870c8cc88080c101edffb414832d863c299455
|
e0e74b7a0a1c76fd6d6e21c80ce57302a2cd6b6f
|
refs/heads/master
| 2022-06-25T16:33:36.427635
| 2022-06-10T16:15:53
| 2022-06-10T16:15:53
| 187,670,116
| 0
| 2
|
MIT
| 2019-08-02T20:26:20
| 2019-05-20T15:44:13
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
#basedir = '/fastscratch/snarayan/genarrays/v_deepgen_3/'
#figsdir = '/home/snarayan/public_html/figs/deepgen/v3/'
basedir = '/data/t3serv014/snarayan/deep//v_deepgen_4_small/'
figsdir = '/home/snarayan/public_html/figs/deepgen/v4_kl/'
from os import system
system('mkdir -p '+figsdir)
|
[
"sidn@mit.edu"
] |
sidn@mit.edu
|
81a9468e822101750e73217c4b2e6d17f02e75b2
|
7a3fc3ea3dd71e4ec85ac73e0af57ae976777513
|
/.history/flaskblog_20210524215327.py
|
3dc704c3d526e4ab9cbfa1dbab452f4fb649c5bb
|
[] |
no_license
|
khanhdk0000/first_proj
|
72e9d2bbd788d6f52bff8dc5375ca7f75c0f9dd0
|
bec0525353f98c65c3943b6d42727e3248ecfe22
|
refs/heads/main
| 2023-05-12T10:36:08.026143
| 2021-06-05T15:35:22
| 2021-06-05T15:35:22
| 374,148,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def hello_world():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('home.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"khanhtran28092000@gmail.com"
] |
khanhtran28092000@gmail.com
|
40801f7d40c23e8ef9fd71aef06229192814b53d
|
738b6d6ec4572f5848940b6adc58907a03bda6fb
|
/tests/pymcell4_positive/3000_reports_check/model.py
|
03fdbc184e2cc0fea4c8bae504524b1bb53ba093
|
[
"MIT",
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mcellteam/mcell_tests
|
09cd1010a356e0e07c88d7e044a73c5606c6e51a
|
34d2d967b75d56edbae999bf0090641850f4f4fe
|
refs/heads/master
| 2021-12-24T02:36:24.987085
| 2021-09-24T14:19:41
| 2021-09-24T14:19:41
| 174,733,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
#!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
params = m.bngl_utils.load_bngl_parameters('test.bngl')
ITERATIONS = int(params['ITERATIONS'])
# ---- load bngl file ----
model = m.Model()
if 'MCELL_DEFAULT_COMPARTMENT_VOLUME' in params:
MCELL_DEFAULT_COMPARTMENT_VOLUME = params['MCELL_DEFAULT_COMPARTMENT_VOLUME']
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = MCELL_DEFAULT_COMPARTMENT_VOLUME**(1.0/3.0)
default_compartment = m.geometry_utils.create_box(
'default_compartment', MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH
)
model.add_geometry_object(default_compartment)
else:
MCELL_DEFAULT_COMPARTMENT_EDGE_LENGTH = 1
default_compartment = None
model.load_bngl('test.bngl', './react_data/seed_' + str(1).zfill(5) + '/', default_compartment)
# ---- configuration ----
model.config.total_iterations = ITERATIONS
model.notifications.rxn_and_species_report = True
model.initialize()
model.run_iterations(ITERATIONS)
model.end_simulation()
# check that reports exist
assert os.path.exists(os.path.join('reports', 'rxn_report_00001.txt'))
assert os.path.exists(os.path.join('reports', 'species_report_00001.txt'))
assert os.path.exists(os.path.join('reports', 'warnings_report_00001.txt'))
|
[
"ahusar@salk.edu"
] |
ahusar@salk.edu
|
6cb5a5e75a9c8324286d70e2893b91e427710002
|
60cbdf1f9771159f872e632017fa736800784297
|
/Codewars/Find-the-odd-int.py
|
c1567718b97f70ca035fd5cb5332e8b15ddf1595
|
[] |
no_license
|
AG-Systems/programming-problems
|
6ea8c109f04c4d22db6e63fe7b665894c786242a
|
39b2d3546d62b48388788e36316224e15a52d656
|
refs/heads/master
| 2023-04-16T16:59:20.595993
| 2023-04-05T01:25:23
| 2023-04-05T01:25:23
| 77,095,208
| 10
| 3
| null | 2019-10-14T16:16:18
| 2016-12-22T00:03:14
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
def find_it(seq):
hashtable = {}
for x in seq:
if x in hashtable:
hashtable[x] += 1
else:
hashtable[x] = 1
for key,val in hashtable.items():
if val % 2 != 0:
return key
"""
def find_it(seq):
for i in seq:
if seq.count(i)%2!=0:
return i
CLEVER SOLUTION
"""
|
[
"noreply@github.com"
] |
AG-Systems.noreply@github.com
|
c41845008487c6093767f0afb04faa5273492412
|
2729fff7cb053d2577985d38c8962043ee9f853d
|
/bokeh/sampledata/tests/test_perceptions.py
|
fd7da155b0a33f279e0b263f36c4dea9d66e1929
|
[
"BSD-3-Clause"
] |
permissive
|
modster/bokeh
|
2c78c5051fa9cac48c8c2ae7345eafc54b426fbd
|
60fce9003aaa618751c9b8a3133c95688073ea0b
|
refs/heads/master
| 2020-03-29T01:13:35.740491
| 2018-09-18T06:08:59
| 2018-09-18T06:08:59
| 149,377,781
| 1
| 0
|
BSD-3-Clause
| 2018-09-19T02:02:49
| 2018-09-19T02:02:49
| null |
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.perceptions as bsp
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'numberly',
'probly',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.perceptions", ALL))
@pytest.mark.sampledata
def test_numberly(pd):
import bokeh.sampledata.perceptions as bsp
assert isinstance(bsp.numberly, pd.DataFrame)
# check detail for package data
assert len(bsp.numberly) == 46
@pytest.mark.sampledata
def test_probly(pd):
import bokeh.sampledata.perceptions as bsp
assert isinstance(bsp.probly, pd.DataFrame)
# check detail for package data
assert len(bsp.probly) == 46
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
[
"noreply@github.com"
] |
modster.noreply@github.com
|
e55a27dca2368a2b46c0c89ab0d91c9214f68154
|
9f2f386a692a6ddeb7670812d1395a0b0009dad9
|
/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py
|
3189e060d58373250ba776271009bcab004e762b
|
[
"Apache-2.0"
] |
permissive
|
sandyhouse/Paddle
|
2f866bf1993a036564986e5140e69e77674b8ff5
|
86e0b07fe7ee6442ccda0aa234bd690a3be2cffa
|
refs/heads/develop
| 2023-08-16T22:59:28.165742
| 2022-06-03T05:23:39
| 2022-06-03T05:23:39
| 181,423,712
| 0
| 7
|
Apache-2.0
| 2022-08-15T08:46:04
| 2019-04-15T06:15:22
|
C++
|
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 3, 3]).astype('float32')
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.fluid.layers.conv2d(x, num_filters=3, filter_size=3)
out = paddle.fluid.layers.Print(out, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
self.fetch_list = [loss.name]
else:
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"message": "input_data"}
class TestTrainCase1(TestBase):
def set_op_attrs(self):
# "forward" : print forward
# "backward" : print forward and backward
# "both": print forward and backward
self.attrs = {"message": "input_data2", "print_phase": "both"}
def set_training(self):
self.is_training = True
self.epoch = 2
@unittest.skip("attrs are not supported")
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {
"first_n": 10,
"summarize": 10,
"print_tensor_name": True,
"print_tensor_type": True,
"print_tensor_shape": True,
"print_tensor_layout": True,
"print_tensor_lod": True
}
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
sandyhouse.noreply@github.com
|
847583d24105d7141eccf2797b87b466cbd57b01
|
99f43f4591f63d0c57cd07f07af28c0b554b8e90
|
/python/프로그래머스/직사각형만들기.py
|
16bef1835142eb60413503e1b66a432aafa71fc8
|
[] |
no_license
|
SINHOLEE/Algorithm
|
049fa139f89234dd626348c753d97484fab811a7
|
5f39d45e215c079862871636d8e0306d6c304f7e
|
refs/heads/master
| 2023-04-13T18:55:11.499413
| 2023-04-10T06:21:29
| 2023-04-10T06:21:29
| 199,813,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
def solution(v):
v = sorted(v)
x_dic = {}
y_dic = {}
for x, y in v:
if x_dic.get(x) is None:
x_dic[x] = 1
else:
x_dic[x] += 1
if y_dic.get(y) is None:
y_dic[y] = 1
else:
y_dic[y] += 1
answer = []
for x, cnt in x_dic.items():
if cnt == 1:
answer.append(x)
break
for y, cnt in y_dic.items():
if cnt == 1:
answer.append(y)
return answer
print(solution([[1, 1], [2, 2], [1, 2]]))
|
[
"dltlsgh5@naver.com"
] |
dltlsgh5@naver.com
|
099107a1fc7a937fe06c9e7494308aa4d7f2223e
|
26d030d1a8134f1900d11054dc63c674dc2beec8
|
/main.py
|
0895f7c340f491aad10624532b6215a61944c9a2
|
[
"MIT"
] |
permissive
|
kendricktan/pychip8
|
1ea1259abb61485c0db9bd26dda0201c2369452d
|
c9eb4f950f4546dbad0ca84f1c393d822a925a10
|
refs/heads/master
| 2021-04-27T15:44:17.064807
| 2018-02-23T14:28:13
| 2018-02-23T14:28:13
| 122,475,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from pychip8.emulator import Chip8
if __name__ == '__main__':
rom_name = 'pong.rom'
chip8 = Chip8(rom_name)
chip8.run()
|
[
"kendricktan0814@gmail.com"
] |
kendricktan0814@gmail.com
|
28bf6dde8bb5f2f4f836584daa7697bbbb60659a
|
5679731cee36c537615d285ed72810f4c6b17380
|
/492_ConstructTheRectangle.py
|
864fd723b3b57af7cb42b67c170b150f6a55bac9
|
[] |
no_license
|
manofmountain/LeetCode
|
6b76105190a9b62df65a7b56b6def4120498b9fa
|
718f688b3d316e8c10ef680d9c21ecd518d062f8
|
refs/heads/master
| 2021-01-12T03:41:48.318116
| 2017-07-18T12:35:58
| 2017-07-18T12:35:58
| 78,252,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
import math
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
width, res = int(math.sqrt(area)), list()
while width != 0:
if area % width == 0:
res.append(area / width)
res.append(width)
break
width -= 1
return res
|
[
"noreply@github.com"
] |
manofmountain.noreply@github.com
|
0a2e966187b89beb9f8331300b18f5e41d660407
|
69c882c678103b182988fb60d3e898d569980f1c
|
/Day 6/day6prog14.py
|
6b4f901785e52ff819f090084f7e227d01a62b68
|
[] |
no_license
|
gittygupta/stcet-python
|
44be9d91cdd6215879d9f04497214819228821be
|
e77456172746ee76b6e2a901ddb0c3dbe457f82a
|
refs/heads/master
| 2022-03-05T11:37:08.720226
| 2019-12-01T00:56:03
| 2019-12-01T00:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
def fact(x):
pro = 1
for i in range(1, x+1):
pro *= i
return pro
def perm(n, r):
return (fact(n)/(fact(n - r) * fact(r)))
n = input("n = ")
r = input("r = ")
print(perm(n, r))
|
[
"noreply@github.com"
] |
gittygupta.noreply@github.com
|
931e52355a9877de357fa0e0b6a602e2de02d64e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayBossFncInvoiceBatchqueryResponse.py
|
cfe0f6e638e1c8ae763e1e65af4347042e876024
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.ArInvoiceOpenApiResponse import ArInvoiceOpenApiResponse
class AlipayBossFncInvoiceBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncInvoiceBatchqueryResponse, self).__init__()
self._amt = None
self._current_page = None
self._items_page = None
self._result_set = None
self._total_items = None
self._total_pages = None
@property
def amt(self):
return self._amt
@amt.setter
def amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._amt = value
else:
self._amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, value):
self._current_page = value
@property
def items_page(self):
return self._items_page
@items_page.setter
def items_page(self, value):
self._items_page = value
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, list):
self._result_set = list()
for i in value:
if isinstance(i, ArInvoiceOpenApiResponse):
self._result_set.append(i)
else:
self._result_set.append(ArInvoiceOpenApiResponse.from_alipay_dict(i))
@property
def total_items(self):
return self._total_items
@total_items.setter
def total_items(self, value):
self._total_items = value
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncInvoiceBatchqueryResponse, self).parse_response_content(response_content)
if 'amt' in response:
self.amt = response['amt']
if 'current_page' in response:
self.current_page = response['current_page']
if 'items_page' in response:
self.items_page = response['items_page']
if 'result_set' in response:
self.result_set = response['result_set']
if 'total_items' in response:
self.total_items = response['total_items']
if 'total_pages' in response:
self.total_pages = response['total_pages']
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
1e47736427d5b29ddbed8c696b895ae76e78410d
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-vm-migration/samples/generated_samples/vmmigration_v1_generated_vm_migration_finalize_migration_async.py
|
9725b31782e691f5713fa20467e00eb66fe54fa1
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for FinalizeMigration
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_FinalizeMigration_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import vmmigration_v1
async def sample_finalize_migration():
# Create a client
client = vmmigration_v1.VmMigrationAsyncClient()
# Initialize request argument(s)
request = vmmigration_v1.FinalizeMigrationRequest(
migrating_vm="migrating_vm_value",
)
# Make the request
operation = client.finalize_migration(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_FinalizeMigration_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
c586bd5693c7518eb1d938ce2ad960a01f98d250
|
f95e73867e4383784d6fdd6a1c9fe06cffbfd019
|
/ProjectEuler/p004_Largest_palindrome_product.py
|
5d8c9510aee2afac0a0864fbbfc27608ef991779
|
[] |
no_license
|
linxiaohui/CodeLibrary
|
da03a9ed631d1d44b098ae393b4bd9e378ab38d3
|
96a5d22a8c442c4aec8a064ce383aba8a7559b2c
|
refs/heads/master
| 2021-01-18T03:42:39.536939
| 2018-12-11T06:47:15
| 2018-12-11T06:47:15
| 85,795,767
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import timeit
def PE004():
M=0
for i in range(100,10000):
for j in range(i+1,1000):
k=i*j
#if k==int(str(k)[::-1]) and k>M :
if k>M and k==int(str(k)[::-1]) :
M=k
print M
print timeit.timeit(PE004, number=1)
|
[
"llinxiaohui@126.com"
] |
llinxiaohui@126.com
|
6e3669121fdd67488f4e7ec58aa121cf467f15dc
|
f8ffac4fa0dbe27316fa443a16df8a3f1f5cff05
|
/Regex/Matching_Anything_But_New_Line.py
|
9d1b4802c8910f717e3f7aafecd4dfcb1cc4b4c3
|
[] |
no_license
|
ankitniranjan/HackerrankSolutions
|
e27073f9837787a8af7a0157d95612028c07c974
|
e110c72d3b137cf4c5cef6e91f58a17452c54c08
|
refs/heads/master
| 2023-03-16T19:06:17.805307
| 2021-03-09T16:28:39
| 2021-03-09T16:28:39
| 292,994,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Your task is to write a regular expression that matches only and exactly strings of form: abc.def.ghi.jkx, where each variable a,b,c,d,e,f,g,h,i,j,k,x can be
# any single character except the newline.
regex_pattern = r"^.{3}\..{3}\..{3}\..{3}$" # Do not delete 'r'.
import re
import sys
test_string = input()
match = re.match(regex_pattern, test_string) is not None
print(str(match).lower())
|
[
"noreply@github.com"
] |
ankitniranjan.noreply@github.com
|
f1852c7da40eb5c08990351bb1c5c7ea3197c233
|
7bfcb91f95d20f1199d54f91c9a095df08b44d83
|
/Backup/Django_Youtube/WebBanHang/user/models.py
|
b2d82ecc75ed2668b3c7dbb54babf9acbad04250
|
[] |
no_license
|
llduyll10/backup
|
bcb09eb632dd0858d515aacb7132d913da4dc24c
|
8849d812566977f9a379d38ee1daa2ef42c02c7f
|
refs/heads/master
| 2023-02-28T11:22:23.831040
| 2021-02-01T17:09:55
| 2021-02-01T17:09:55
| 335,006,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomerUser(AbstractUser):
phone_number = models.CharField(default='', max_length=15)
address = models.CharField(default='', max_length=255)
|
[
"llduyll10@gmail.com"
] |
llduyll10@gmail.com
|
72caec1e57d85a6bf4b606a5228254cf3c680874
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_179/ch25_2020_03_23_14_36_52_247565.py
|
b77b247db72eeb75a6603e8b3a253feeebcab017
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
import math
g = 9.8
def calcula_distancia (velocidade, angulo):
angulo_radianos = math.degrees(angulo)
distancia = (velocidade**2 * math.sin(2*angulo_radianos))/g
return distancia
if distancia < 98:
print ('Muito perto')
elif distancia > 102:
print ('Muito longe')
else:
print ('Acertou!')
|
[
"you@example.com"
] |
you@example.com
|
01aaab4806daf83624fce5a5d71e77ac84e3cb95
|
714983fc24c6befe80d426dd94134d09ad2cbdfb
|
/env/lib/python3.6/site-packages/RestAuth/Services/migrations/0004_delete_old_m2m.py
|
31494a3ab34e3a19585de405f5ad81cb7bb1f511
|
[] |
no_license
|
sachinlokesh05/login-registration-forgotpassword-and-resetpassword-using-django-rest-framework-
|
486354ffb3a397c79afc6cbb290ab1cd637f50ac
|
60769f6b4965836b2220878cfa2e1bc403d8f8a3
|
refs/heads/master
| 2023-01-28T22:19:13.483527
| 2020-01-28T14:07:53
| 2020-01-28T14:07:53
| 233,223,694
| 3
| 0
| null | 2023-01-07T22:10:06
| 2020-01-11T11:49:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,682
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field hosts on 'Service'
db.delete_table('Services_service_hosts')
def backwards(self, orm):
# Adding M2M table for field hosts on 'Service'
db.create_table('Services_service_hosts', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm['Services.service'], null=False)),
('serviceaddress', models.ForeignKey(orm['Services.serviceaddress'], null=False))
))
db.create_unique('Services_service_hosts', ['service_id', 'serviceaddress_id'])
models = {
'Services.service': {
'Meta': {'object_name': 'Service', '_ormbases': ['auth.User']},
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'Services.serviceaddress': {
'Meta': {'object_name': 'ServiceAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '39'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'services': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['Services.Service']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Services']
|
[
"sachin.beee.15@acharya.ac.in"
] |
sachin.beee.15@acharya.ac.in
|
52bd5b80c303f7ec03c6a84634f9654784e1fe1c
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/grib2/modelName_def.py
|
ca5790b97b3bf22a70902abdc87628726645d7a4
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078
| 2020-04-18T06:30:29
| 2020-04-18T06:30:29
| 255,554,540
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
originatingCentre = h.get_l('originatingCentre')
if originatingCentre == 242:
return 'cosmo-romania'
if originatingCentre == 220:
return 'cosmo-poland'
if originatingCentre == 96:
return 'cosmo-greece'
generatingProcessIdentifier = h.get_l('generatingProcessIdentifier')
if originatingCentre == 76 and generatingProcessIdentifier == 235:
return 'cosmo_ru-eps'
if originatingCentre == 76 and generatingProcessIdentifier == 135:
return 'cosmo_ru'
if originatingCentre == 200 and generatingProcessIdentifier == 131:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 46:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 42:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 38:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 34:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 32:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 31:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 148:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 144:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 139:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 36:
return 'cosmo-i2'
subCentre = h.get_l('subCentre')
if subCentre == 250:
return 'cosmo'
if originatingCentre == 250:
return 'cosmo'
return wrapped
|
[
"baudouin.raoult@ecmwf.int"
] |
baudouin.raoult@ecmwf.int
|
bb5ebaf33900bfcc44fdc19ac42207993daeaa5f
|
551d993b15f7e54635cc11d7ed3ee45a2e9aacc6
|
/AAE/Tensorflow_implementation/unsupervised/regularized_z/model.py
|
df4e3fcf6ad90ce669025df91eb33dfbcfbcb10a
|
[
"MIT"
] |
permissive
|
hendrikTpl/GAN_models
|
6185a3c112a8b45205bdd4c556164b6153fbec19
|
8234c7f04be39d20fe09f81511b591deab9152a9
|
refs/heads/master
| 2021-10-25T16:52:13.239290
| 2019-04-05T15:28:06
| 2019-04-05T15:28:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,844
|
py
|
from component_without_bn import *
class Object:
pass
def build_graph(is_test=False):
# Inputs
images = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_x])
z_sampler = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_z])
learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
# Graph
encoder = encoder_x_z
decoder = decoder_z_x
discriminator = discriminator_z
with tf.variable_scope('encoder'):
z_representation = encoder(images)
with tf.variable_scope('decoder'):
reconstruction = decoder(z_representation)
if is_test:
test_handle = Object()
test_handle.x = images
test_handle.z_r = z_representation
test_handle.x_r = reconstruction
return test_handle
probability_fake_sample = discriminator(z_representation)
probability_true_sample = discriminator(z_sampler, reuse=True)
# Loss function
# classification
# 0 -> true sample
# 1 -> generated sample
class_true = tf.ones(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
class_fake = tf.zeros(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
loss_discriminator = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample, class_fake,
class_true)
loss_encoder = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample,\
class_fake, class_true, for_generator=True)
loss_resconstruction = opt.euclidean_distance(images, reconstruction)
# Variables Collection
variables_encoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
variables_decoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
variables_discriminator = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
# Optimizer
counter_encoder = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_resconstruction = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_discriminator = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
opt_resconstruction = opt.optimize(loss_resconstruction, variables_decoder + variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_resconstruction
)
opt_discriminator = opt.optimize(config.scale_ratio * loss_discriminator, variables_discriminator,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_discriminator
)
opt_encoder = opt.optimize(config.scale_ratio * loss_encoder, variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_encoder
)
# output what we want
graph_handle = Object()
graph_handle.x = images
graph_handle.z = z_sampler
graph_handle.x_ = reconstruction
graph_handle.z_r = z_representation
graph_handle.opt_r = opt_resconstruction
graph_handle.opt_d = opt_discriminator
graph_handle.opt_e = opt_encoder
graph_handle.loss_d = loss_discriminator
graph_handle.loss_e = loss_encoder
graph_handle.loss_r = loss_resconstruction
graph_handle.lr = learning_rate
return graph_handle
|
[
"1019636836@qq.com"
] |
1019636836@qq.com
|
4e84c64706c5b3dcde4f84dc13e6085aa18fa72b
|
61296b98e4d481893db4bc51d75652c7109ae626
|
/0000_examples/cobotta_g.py
|
116d8d398c21d519f84520776dd6e95bfdd43b4d
|
[
"MIT"
] |
permissive
|
Shogo-Hayakawa/wrs
|
23d4560b1062cf103ed32db4b2ef1fc2261dd765
|
405f15be1a3f7740f3eb7d234d96998f6d057a54
|
refs/heads/main
| 2023-08-19T19:29:15.409949
| 2021-11-02T01:22:29
| 2021-11-02T01:22:29
| 423,663,614
| 0
| 0
|
MIT
| 2021-11-02T00:59:17
| 2021-11-02T00:59:17
| null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
import visualization.panda.world as wd
import grasping.planning.antipodal as gp
import robot_sim.end_effectors.grippers.cobotta_gripper.cobotta_gripper as cg
import modeling.collision_model as cm
import modeling.geometric_model as gm
import numpy as np
import math
base = wd.World(cam_pos=np.array([.5, .5, .5]), lookat_pos=np.array([0, 0, 0]))
gm.gen_frame().attach_to(base)
objcm = cm.CollisionModel("objects/holder.stl")
objcm.attach_to(base)
# base.run()
hnd_s = cg.CobottaGripper()
# hnd_s.gen_meshmodel().attach_to(base)
# base.run()
grasp_info_list = gp.plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(175),
openning_direction='loc_y',
rotation_interval=math.radians(15),
max_samples=20,
min_dist_between_sampled_contact_points=.001,
contact_offset=.001)
gp.write_pickle_file(objcm_name="holder",
grasp_info_list=grasp_info_list,
file_name="cobg_holder_grasps.pickle")
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
hnd_s.gen_meshmodel().attach_to(base)
base.run()
|
[
"wanweiwei07@gmail.com"
] |
wanweiwei07@gmail.com
|
560bbdf2d856311a383f2556ff042c6b24798d81
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/SAF-ENTERPRISE.py
|
25f13b95d7e50531041d277cb4e2ad47bc261ce1
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
#
# PySNMP MIB module SAF-ENTERPRISE (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SAF-ENTERPRISE
# Produced by pysmi-0.3.4 at Wed May 1 14:59:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Integer32, Counter32, Bits, iso, Gauge32, Unsigned32, IpAddress, MibIdentifier, enterprises, TimeTicks, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Counter32", "Bits", "iso", "Gauge32", "Unsigned32", "IpAddress", "MibIdentifier", "enterprises", "TimeTicks", "ModuleIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
saf = ModuleIdentity((1, 3, 6, 1, 4, 1, 7571))
if mibBuilder.loadTexts: saf.setLastUpdated('2007040300Z')
if mibBuilder.loadTexts: saf.setOrganization('SAF Tehnika')
if mibBuilder.loadTexts: saf.setContactInfo('SAF Tehnika technical support <techsupport@saftehnika.com>')
if mibBuilder.loadTexts: saf.setDescription('')
tehnika = ObjectIdentity((1, 3, 6, 1, 4, 1, 7571, 100))
if mibBuilder.loadTexts: tehnika.setStatus('current')
if mibBuilder.loadTexts: tehnika.setDescription('Subtree to register SAF tehnika modules')
microwaveRadio = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1))
pointToPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1, 1))
mibBuilder.exportSymbols("SAF-ENTERPRISE", tehnika=tehnika, PYSNMP_MODULE_ID=saf, microwaveRadio=microwaveRadio, pointToPoint=pointToPoint, saf=saf)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
b18c5a2b2afb8aa641c036874755e5247c1d83d0
|
be78d77bea1a5eea2a7f0d4090e1fc138623b79a
|
/cybox/test/objects/link_test.py
|
bac34e34bbbca2617a14995b938c2e2f2505741b
|
[
"BSD-3-Clause"
] |
permissive
|
CybOXProject/python-cybox
|
399f73feb6a54778dca9260b1c0340a3895c6369
|
25e6e8b3a6f429f079d3fbd9ace3db9eb3d5ab71
|
refs/heads/master
| 2020-05-21T19:05:56.725689
| 2020-05-01T13:33:48
| 2020-05-01T13:33:48
| 7,631,169
| 43
| 31
|
BSD-3-Clause
| 2020-05-01T12:41:03
| 2013-01-15T19:04:47
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.core import Observables
from cybox.objects.link_object import Link
from cybox.objects.uri_object import URI
from cybox.test.objects import ObjectTestCase
class TestLink(ObjectTestCase, unittest.TestCase):
object_type = "LinkObjectType"
klass = Link
_full_dict = {
'value': u("http://www.example.com"),
'type': URI.TYPE_URL,
'url_label': u("Click Here!"),
'xsi:type': object_type,
}
# https://github.com/CybOXProject/python-cybox/issues/202
def test_correct_namespace_output(self):
link = Link()
link.value = u("https://www.example.com")
xml = Observables(link).to_xml()
self.assertTrue(b"cybox:Properties" in xml)
self.assertTrue(b"LinkObj:Properties" not in xml)
if __name__ == "__main__":
unittest.main()
|
[
"gback@mitre.org"
] |
gback@mitre.org
|
1e8fed92b77867c5a707bc1e8cdaed3ff6f5566b
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/20ed819acd6f85b1facda3b799d3c24b3ada7ad6-<run>-bug.py
|
9d67f4caf81ac18c3daab8feb6cc8736cb5c336a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
def run(self, terms, variables, **kwargs):
if (not CREDSTASH_INSTALLED):
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {
'profile_name': profile_name,
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'aws_session_token': aws_session_token,
}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message))
ret.append(val)
return ret
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
00884fcc431f3b0fc1c306f662977b63ebc1c16c
|
743da4642ac376e5c4e1a3b63c079533a5e56587
|
/build/lib.win-amd64-3.6/fairseq/modules/quantization/pq/modules/__init__.py
|
b6881e26bb167f75f55dacfac72238979dd74f80
|
[
"MIT"
] |
permissive
|
tmtmaj/Exploiting-PrLM-for-NLG-tasks
|
cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b
|
e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5
|
refs/heads/main
| 2023-06-16T08:26:32.560746
| 2021-07-14T17:50:19
| 2021-07-14T17:50:19
| 371,899,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qlinear import PQLinear # NOQA
from .qemb import PQEmbedding # NOQA
|
[
"qkrwjdgur09@naver.com"
] |
qkrwjdgur09@naver.com
|
52ae3a1a8d1d8f8f7503b9181f015b165f68bf00
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_scrubs.py
|
6efbee2e9b847a91a88e3d43d8c1023f95e3fd07
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.nouns._scrub import _SCRUB
#calss header
class _SCRUBS(_SCRUB, ):
def __init__(self,):
_SCRUB.__init__(self)
self.name = "SCRUBS"
self.specie = 'nouns'
self.basic = "scrub"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9c765fca0194129caa59e74b70cc204fc59bce14
|
cf1e19f7b6354302037bca563b42218df7d79400
|
/최단경로/[2307]도로검문.py
|
3540a2ab6f4b48e1f02290e4e11b12bf476f0669
|
[] |
no_license
|
kim-kiwon/Baekjoon-Algorithm
|
680565ddeced2d44506ae6720cf32d8004db42f8
|
4699e6551d3e7451648b9256c54ea4318b71bd4d
|
refs/heads/master
| 2023-04-13T11:10:21.031969
| 2021-04-26T10:50:08
| 2021-04-26T10:50:08
| 325,209,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
#다익스트라 + 경로추적
import heapq
n, m = map(int, input().split())
INF = int(1e9)
graph = [[] for _ in range(n+1)]
previous = [1] * (n+1) #이전 노드 저장
for _ in range(m):
a, b, dist = map(int, input().split())
graph[a].append((b, dist))
graph[b].append((a, dist))
def dijkstra():
distance = [INF] * (n+1)
distance[1] = 0
q = []
q.append((1, 0))
while q:
now, dist = heapq.heappop(q)
if distance[now] < dist:
continue
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (i[0], cost))
previous[i[0]] = now
return distance[n]
init_val = dijkstra() #다익스트라 수행. 초기 최단경로 저장.
temp = [] #1->n 까지 최단경로에 거치는 간선들 저장할 리스트.
now = n #n부터 1까지 역순으로 탐지할것.
while True:
if now == 1: break #1까지 탐지 완료시 종료
a = previous[now] #a : 이전노드
b = now #b : 현재노드
for i in graph[now]: #dist = 이전노드 -> 현재노드 거리.
if i[0] == previous[now]:
dist = i[1]
break
temp.append((a, b, dist)) #temp에 이전노드 현재노드 거리 삽입.
now = previous[now]
max_val = -1e9
#최단경로에 사용하는 간선들 없애는게 아니면
#반드시 최단경로 사용할 것이기에 cost변화 없다.
while True:
if len(temp) == 0: break
#최단경로에 사용한 간선 중 하나 삭제 -> 다익스트라로 거리측정 -> 다시 추가
a, b, dist = temp.pop()
graph[a].remove((b, dist))
graph[b].remove((a, dist))
max_val = max(max_val, dijkstra())
graph[a].append((b, dist))
graph[b].append((a, dist))
if max_val >= 1e9:
print(-1)
else:
print(max_val - init_val)
|
[
"76721493+kim-kiwon@users.noreply.github.com"
] |
76721493+kim-kiwon@users.noreply.github.com
|
e1c2fca2bad35624293caa5c903e7e1a37fcb96d
|
e35eb92b5ab6547119585004b9eea3cafe948050
|
/efsw/archive/errors.py
|
3b9ac8626e58cb7513fc221356b582c5bec573f4
|
[] |
no_license
|
einsfr/mmkit
|
0a084db85b2cf5ba268e692676095d768733f387
|
f12bc2f83254a3123e02abdc105816cc04c438b5
|
refs/heads/master
| 2020-12-31T05:56:19.287611
| 2016-06-10T05:56:58
| 2016-06-10T05:56:58
| 29,473,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
ITEM_LINK_SELF_SELF = 'Элемент не может иметь связь с самим собой.'
ITEM_LINK_TYPE_UNKNOWN = 'Неизвестный тип связи между элементами: {0}.'
ITEM_NOT_FOUND = 'Элемент(ы) с ID {0} не существует(ют).'
STORAGE_NOT_FOUND = 'Хранилище(а) с ID {0} не существует(ют).'
STORAGE_NOT_ALLOWED_AS_ARCHIVE = 'Хранилище(а) с ID {0} нельзя использовать как архивные.'
CATEGORY_NOT_FOUND = 'Категория(и) с ID {0} не существует(ют).'
|
[
"einsfr@users.noreply.github.com"
] |
einsfr@users.noreply.github.com
|
9084c5e743b26571e62ba65a4df2d3ec5e68700c
|
a3972cb6ba32abd18b374975f4abd5318bc95f09
|
/project/src/yosigy/api/yosigy_list_views.py
|
960d32f8f54604c94ee00262c81979094695a2d5
|
[] |
no_license
|
ssr03/MiniDelivery
|
c57bb45e497cab34787473925663ace46dbb6b2d
|
659d9757d1f369a6713aa5a66bab2aa5d6381b8e
|
refs/heads/master
| 2020-07-30T15:05:01.401229
| 2019-09-23T11:52:51
| 2019-09-23T11:52:51
| 210,267,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
import enum
from datetime import datetime
from django.core.paginator import Paginator
from django.db.models import F, Count
from django.http import JsonResponse
from django.views.generic.base import View
from accounts.mixins import LoginRequiredMixin
from restaurant.api.views import CategoryNum
from yosigy.models import Yosigy
class YosigyListInfo(enum.IntEnum):
POST_TO_SHOW_IN_ONE_PAGE = 4
PAGES_TO_SHOW = 3
class YosigyListAPIView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
category_id = kwargs['category_id']
today = datetime.now().date()
tab_value = request.GET.get('tab_value', '')
json_data = {}
if kwargs['page']:
self.page = kwargs['page']
if not category_id or category_id == CategoryNum.ALL_ID:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
else:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
restaurant__category__pk=category_id,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
yosigy_set = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(yosigymenu__menu__is_set_menu=True,)
.annotate(
is_set_menu_count=Count('yosigymenu__menu'),
)
.values(
'is_set_menu_count',
'pk',
)
)
for i in yosigy:
for j in yosigy_set:
if i['pk'] == j['pk']:
i['is_set_menu_count'] = j['is_set_menu_count']
yosigy=list(yosigy)
if not yosigy:
json_data = {
'message': '아직 공동 구매할 수 있는 메뉴가 없습니다.',
}
elif tab_value == 'deadline':
yosigy=sorted(yosigy, key=lambda menu:menu['yosigy_deadline'])
json_data = self.yosigy_paginator(yosigy)
json_data['deadline'] = True
elif tab_value == 'all' or tab_value == '':
json_data = self.yosigy_paginator(yosigy)
json_data['all'] = True
return JsonResponse(
json_data
)
def yosigy_paginator(self, yosigy):
paginator = Paginator(yosigy, YosigyListInfo.POST_TO_SHOW_IN_ONE_PAGE)
current_page = paginator.get_page(self.page)
start = (self.page-1) // YosigyListInfo.PAGES_TO_SHOW * YosigyListInfo.PAGES_TO_SHOW + 1
end = start + YosigyListInfo.PAGES_TO_SHOW
last_page = len(paginator.page_range)
if last_page < end:
end = last_page
yosigy_list = current_page.object_list
page_range = range(start, end + 1)
yosigy_list_data = {
'yosigy_list': yosigy_list,
'current_page': {
'has_previous': current_page.has_previous(),
'has_next': current_page.has_next(),
},
'page_range': [page_range[0], page_range[-1]],
}
if current_page.has_previous():
yosigy_list_data['current_page']['previous_page_number'] = current_page.previous_page_number()
if current_page.has_next():
yosigy_list_data['current_page']['next_page_number'] = current_page.next_page_number()
return yosigy_list_data
|
[
"43363127+ssr03@users.noreply.github.com"
] |
43363127+ssr03@users.noreply.github.com
|
0f41b4c555162561f877240887369c044b1fe898
|
3d589d1c56b55fbd2b45b03564b8a9442ebf142b
|
/lib/src/klio/metrics/base.py
|
1b50aeb1da57930cc8fba17042c72434460c2eb4
|
[
"Apache-2.0"
] |
permissive
|
spotify/klio
|
1aff27412e92c9d699259e5ab1eaeb39dc3e9571
|
e625565708ed846201d2e05f782c0ce585554346
|
refs/heads/develop
| 2023-05-25T14:33:28.348335
| 2022-03-23T20:34:09
| 2022-03-23T20:34:09
| 285,928,366
| 815
| 57
|
Apache-2.0
| 2023-05-24T21:07:09
| 2020-08-07T22:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes from which a metrics consumer (i.e. ffwd, logger, etc.)
will need to implement.
New consumers are required to implement the :class:`AbstractRelayClient`, and
three metrics objects based off of :class:`BaseMetric`: a counter, a gauge, and
a timer.
"""
import abc
import six
class _DummyAttribute(object):
# for the ability to do `FOO_ATTR = abstract_attr()` as well as
# decorate a property method
pass
def abstract_attr(obj=None):
"""Set an attribute or a property as abstract.
Supports class-level attributes as well as methods defined as a
``@property``.
Usage:
.. code-block:: python
class Foo(object):
my_foo_attribute = abstract_attr()
@property
@abstract_attr
def my_foo_property(self):
pass
Args:
obj (callable): Python object to "decorate", i.e. a class method. If
none is provided, a dummy object is created in order to attach
the ``__isabstractattr__`` attribute (similar to
``__isabstractmethod__`` from ``abc.abstractmethod``).
Returns object with ``__isabstractattr__`` attribute set to ``True``.
"""
if not obj:
obj = _DummyAttribute()
obj.__isabstractattr__ = True
return obj
def _has_abstract_attributes_implemented(cls, name, bases):
"""Verify a given class has its abstract attributes implemented."""
for base in bases:
abstract_attrs = getattr(base, "_klio_metrics_abstract_attributes", [])
class_attrs = getattr(cls, "_klio_metrics_all_attributes", [])
for attr in abstract_attrs:
if attr not in class_attrs:
err_str = (
"Error instantiating class '{0}'. Implementation of "
"abstract attribute '{1}' from base class '{2}' is "
"required.".format(name, attr, base.__name__)
)
raise NotImplementedError(err_str)
def _get_all_attributes(clsdict):
return [name for name, val in six.iteritems(clsdict) if not callable(val)]
def _get_abstract_attributes(clsdict):
return [
name
for name, val in six.iteritems(clsdict)
if not callable(val) and getattr(val, "__isabstractattr__", False)
]
class _ABCBaseMeta(abc.ABCMeta):
"""Enforce behavior upon implementations of ABC classes."""
def __init__(cls, name, bases, clsdict):
_has_abstract_attributes_implemented(cls, name, bases)
def __new__(metaclass, name, bases, clsdict):
clsdict[
"_klio_metrics_abstract_attributes"
] = _get_abstract_attributes(clsdict)
clsdict["_klio_metrics_all_attributes"] = _get_all_attributes(clsdict)
cls = super(_ABCBaseMeta, metaclass).__new__(
metaclass, name, bases, clsdict
)
return cls
class AbstractRelayClient(six.with_metaclass(_ABCBaseMeta)):
"""Abstract base class for all metric consumer relay clients.
Each new consumer (i.e. ffwd, logging-based metrics)
will need to implement this relay class.
Attributes:
RELAY_CLIENT_NAME (str): must match the key in ``klio-job.yaml``
under ``job_config.metrics``.
"""
RELAY_CLIENT_NAME = abstract_attr()
def __init__(self, klio_config):
self.klio_config = klio_config
@abc.abstractmethod
def unmarshal(self, metric):
"""Returns a dictionary-representation of the ``metric`` object"""
pass
@abc.abstractmethod
def emit(self, metric):
"""Emit the given metric object to the particular consumer.
``emit`` will be run in a threadpool separate from the transform,
and any errors raised from the method will be logged then ignored.
"""
pass
@abc.abstractmethod
def counter(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated counter-type metric specific for
the particular consumer.
Callers to the ``counter`` method will store new counter objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def gauge(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated gauge-type metric specific for
the particular consumer.
Callers to the ``gauge`` method will store new gauge objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def timer(self, name, transform=None, **kwargs):
"""Return a newly instantiated timer-type metric specific for
the particular consumer.
Callers to the ``timer`` method will store new timer objects
returned in memory for simple caching.
"""
pass
class BaseMetric(object):
"""Base class for all metric types.
A consumer must implement a counter metric, a gauge metric, and a
timer metric.
"""
def __init__(self, name, value=0, transform=None, **kwargs):
self.name = name
self.value = value
self.transform = transform
def update(self, value):
self.value = value
|
[
"lynn@spotify.com"
] |
lynn@spotify.com
|
2682ec078d2d665c54515022a6840ddf88168001
|
7a1f6f1aae43b219cd34c3c9b907923fb839e6f5
|
/Python/Udemy/FXTRADE/pyfxtrading/pyfxtrading/28/app/controllers/webserver.py
|
bbf2ff35ce8221762754b16b7b6dd096ee8484a4
|
[] |
no_license
|
amanoman/amanoman.github.io
|
b5afc80e0e49ed15db793e2ebf69003c05ab8ce0
|
141c928f6d1df0389859f663f6439d327d4c32d6
|
refs/heads/master
| 2023-05-28T07:22:09.735409
| 2021-03-31T15:00:14
| 2021-03-31T15:00:14
| 187,139,297
| 0
| 1
| null | 2023-05-22T23:37:24
| 2019-05-17T03:19:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 543
|
py
|
from flask import Flask
from flask import render_template
import settings
app = Flask(__name__, template_folder='../views')
@app.teardown_appcontext
def remove_session(ex=None):
from app.models.base import Session
Session.remove()
@app.route('/')
def index():
app.logger.info('index')
return render_template('./google.html',
word='World')
def start():
# app.run(host='127.0.0.1', port=settings.web_port, threaded=True)
app.run(host='0.0.0.0', port=settings.web_port, threaded=True)
|
[
"amntkykblog@gmail.com"
] |
amntkykblog@gmail.com
|
2dd09cf0b1134b3972740048402bc6e9ee1c97be
|
1ece1faa638f85c567fdb237c67340501f86f89e
|
/model/model_builder.py
|
5bc0acb8d41370c2b1905ff26fb7f1070790eb67
|
[] |
no_license
|
seasa2016/transformer_random
|
54223ee5b04a4563c7903d925436d843b8cf7f1c
|
e3e13c9a2ddc49558d8e991427a974848a850b9c
|
refs/heads/master
| 2020-04-02T12:21:28.167673
| 2019-03-19T03:45:00
| 2019-03-19T03:45:00
| 154,429,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,030
|
py
|
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_,xavier_normal_
from .module.Embedding import Embedding
from .util.Logger import logger
from . import Constant
from . import transformer
def build_embedding(opt,word_dict,max_len,for_encoder=True,dtype='sum',tag=None):
if(for_encoder):
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tar_word_vec_size
#print(Constant.PAD_token)
word_padding_idx = word_dict[Constant.PAD_token]
num_word_embedding = len(word_dict)
# num_word,max_len,emb_dim,feature_dim,dropout=0,dtype='sum'
return Embedding(num_word= num_word_embedding,
max_len = max_len,
emb_dim = embedding_dim,
feature_dim = embedding_dim,
padding_idx = word_padding_idx,
dropout = opt.dropout,
dtype = dtype,tag=tag)
def build_encoder(opt,src_dict,tag_dict):
"""
function to build the encoder
"""
max_len = 128
src_embedding = build_embedding(opt,src_dict,max_len,tag=tag_dict)
return transformer.Encoder( opt.enc_layer,opt.num_head,
opt.model_dim,opt.nin_dim_en,
opt.dropout,src_embedding)
def build_decoder(opt,tar_dict):
"""
function to build the decoder
"""
max_len = 128
tar_embedding = build_embedding(opt,tar_dict,max_len,for_encoder=False,dtype=opt.decode_pos)
return transformer.Decoder(
opt.dec_layer,opt.num_head,
opt.model_dim,opt.nin_dim_de,len(tar_dict),max_len,
opt.self_attn_type,opt.dropout,tar_embedding
)
def load_test_model(opt,model_path=None,mode=False):
"""
use the method the acquire the data_dict and the model
"""
if model_path is None:
if(opt.test_from is None):
raise ValueError('test_from shouble not be None')
model_path = opt.test_from
checkpoint = torch.load(model_path)
data_new = dict()
for t in ['source','target','tag']:
data_new[t] = dict()
with open('./{0}/subword.{1}'.format(opt.data,t)) as f_in:
for i,word in enumerate(f_in):
if(t=='source'):
data_new[t][word.strip()[1:-1]] = i
else:
data_new[t][word.strip()+'_'] = i
if(mode == False):
model = build_base_model(checkpoint['opt'],opt, data_new, torch.cuda.is_available(),checkpoint)
else:
#build_model_pre(opt,opt,data_ori,data_new,True,checkpoint=checkpoint)
model = build_base_model(opt,opt,data_new,True,checkpoint=checkpoint)
model.load_state_dict(checkpoint['model'])
model.eval()
return model, opt
def build_base_model(model_opt,opt,data_token,gpu,checkpoint=None,dtype=None):
"""
build the base model
"""
if('tag' in data_token):
encoder = build_encoder(model_opt,data_token['source'],len(data_token['tag']))
else:
encoder = build_encoder(model_opt,data_token['source'],None)
logger.info("finish build encoder")
decoder = build_decoder(model_opt,data_token['target'])
logger.info("finish build decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
if(checkpoint is not None):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
if(model_opt.param_init != 0.0):
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if(model_opt.param_init_glorot):
for p in model.parameters():
if(p.requires_grad):
if p.dim() > 1:
xavier_normal_(p)
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def change(model_opt,opt,model,data_new):
"""
change the decoder and lock the grad for the encoder
"""
model.decoder = build_decoder(opt,data_new['target'])
#update the parameter
model_opt.tar_word_vec_size = opt.tar_word_vec_size
model_opt.dropout = opt.dropout
model_opt.dec_layer = opt.dec_layer
model_opt.num_head = opt.num_head
model_opt.model_dim = opt.model_dim
model_opt.nin_dim_de = opt.nin_dim_de
model_opt.self_attn_type = opt.self_attn_type
model_opt.dropout = opt.dropout
#lock the grad for the encoder
model.encoder.embedding.word_emb.requires_grad = False
if model_opt.param_init != 0.0:
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in model.parameters():
if(p.requires_grad):
if(p.dim()>1):
xavier_normal_(p)
if(opt.replace):
#one for the pretrain model and the other for the new model
logger.info("with mid layer {0} {1}".format(model_opt.model_dim,opt.model_dim))
model.mid = nn.Linear(model_opt.model_dim,opt.model_dim)
return model
def build_model_pre(model_opt,opt,data_ori,data_new,gpu,checkpoint=None):
#in our work,we only use text
#build encoder
encoder = build_encoder(model_opt,data_ori['source'],len(data_ori['tag']))
logger.info("build the origin encoder")
decoder = build_decoder(model_opt,data_ori['target'])
logger.info("build the origin decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
print(model)
if(checkpoint):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
raise ValueError('cant access this mode without using pretrain model')
model = change(model_opt,opt,model,data_new)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def build_model(model_opt,opt,data_token,checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt,opt,data_token,torch.cuda.is_available(),checkpoint)
return model
|
[
"ericet1234@gmail.com"
] |
ericet1234@gmail.com
|
b51e6caa09f683cec6c8f09fb1aca60e73ec36f0
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_25/models/resource_performance_no_id_by_array_get_response.py
|
dc59892d20f80e34fb26c25e0f59584a263ca562
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,240
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ResourcePerformanceNoIdByArrayGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourcePerformanceNoIdByArray]',
'total': 'list[ResourcePerformanceNoIdByArray]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourcePerformanceNoIdByArray]
total=None, # type: List[models.ResourcePerformanceNoIdByArray]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourcePerformanceNoIdByArray]): Performance data, broken down by array. If `total_only=true`, the `items` list will be empty.
total (list[ResourcePerformanceNoIdByArray]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcePerformanceNoIdByArrayGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePerformanceNoIdByArrayGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
2455804a9deef4d3443589283af4dc9f1ef5c926
|
dd8227454b817ccf2ceb24b3dfd4260d4ded7a72
|
/scripts/item/consume_2435694.py
|
020e445c8c1b62894419c308afa2bc358e797d3f
|
[
"MIT"
] |
permissive
|
Snewmy/swordie
|
0dd3c17808b064c2cb2bd9576b51daf01ae5d686
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
refs/heads/master
| 2023-06-30T21:14:05.225798
| 2021-07-06T14:32:39
| 2021-07-06T14:32:39
| 389,497,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
# Heroes Evan Damage Skin
success = sm.addDamageSkin(2435694)
if success:
sm.chat("The Heroes Evan Damage Skin has been added to your account's damage skin collection.")
|
[
"vcalheirosdoc@gmail.com"
] |
vcalheirosdoc@gmail.com
|
12caf078872a5634ca4638aed6dbdbd7776b5062
|
6097031d8e85400214085f152164a29346d106e3
|
/maxheap.py
|
7e3f269c4b2365d3684fe48cdb32ec815206f9cd
|
[] |
no_license
|
ekourkchi/GalaxyGroups
|
2fccca4998850c0838d0c7ef949bba8b1267716a
|
19e98da0015b0462133133a23915e6d633614ad3
|
refs/heads/master
| 2022-04-03T09:30:19.667796
| 2020-02-13T03:05:48
| 2020-02-13T03:05:48
| 112,898,380
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
#!/home/ehsan/Ureka/Ureka/variants/common/bin/python
import numpy as np
from math import *
from copy import *
class heapNode:
key = None
ID = None
flag = False
def __init__(self, key, ID):
self.key = key
self.ID = ID
def toString(self):
print self.key, self.ID, self.flag
# *********************************************
class maxHeap:
size = 0 # Number of current elements
array = []
# *****************
def __init__(self):
self.size = 0
self.array = []
# *****************
def push(self, key, ID):
#print "push:", key, ID, self.size
newNode = heapNode(key, ID)
self.array.append(newNode)
child = self.size
while child > 0:
parent = (child+1)/2-1
if self.array[child].key > self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
child = parent
else:
break
#for i in range(0,self.size+1):
#print self.array[i].key
self.size+=1
return 0
# *****************
def lrmax(self, left, right):
if right <= self.size-1:
if self.array[left].key >= self.array[right].key:
return left
else:
return right
elif left <= self.size-1:
return left
else:
return 0
# *****************
def pop(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
N = self.size
output = self.array[0]
self.array[0] = self.array[N-1]
parent = 0
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
return output
# *****************
def setFlag(self, key):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return False
for i in range(0, self.size):
if self.array[i].key == key:
self.array[i].flag = True
# *****************
def peek(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
else:
return self.array[0]
# *****************
"""
This method removes heap elements which have the same id as the input ID
The number of removed elements would be returned
"""
def remove(self, ID):
boolean = 0
if self.size == 0 :
#print "\n[Error] No elements in the mean Heap ...\n"
return boolean
else:
i = 0
while i < self.size:
# ID would be the object ID
if self.array[i].ID == ID:
parent = i
N = self.size
self.array[parent] = self.array[N-1]
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
boolean+=1
i-=1 # The new item must be checked again
i+=1
return boolean
# *****************
def Size(self): return self.size
# *****************
def toString(self):
for i in range(0,self.size):
self.array[i].toString();
# *********************************************
# *********************************************
if __name__ == '__main__':
myHeap = maxHeap()
myHeap.push(4, "e4")
myHeap.push(7, "e7")
myHeap.push(2, "e2")
myHeap.push(6, "e6")
myHeap.push(8, "e7")
myHeap.push(5, "e5")
myHeap.push(3, "e7")
print "\n", myHeap.Size()
print myHeap.remove("e5")
print "\n", myHeap.Size()
while myHeap.Size()>0:
myHeap.pop().toString()
#print myHeap.peek().key
|
[
"ekourkchi@gmail.com"
] |
ekourkchi@gmail.com
|
4e3d52464d257688f122a23748edd43590043b89
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_24/models/network_interface_neighbor_capability.py
|
76ae203e5847eb8a031597b8f2d39119f564eac0
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class NetworkInterfaceNeighborCapability(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supported': 'bool',
'enabled': 'bool'
}
attribute_map = {
'supported': 'supported',
'enabled': 'enabled'
}
required_args = {
}
def __init__(
self,
supported=None, # type: bool
enabled=None, # type: bool
):
"""
Keyword args:
supported (bool): If true, this capability is supported by this neighbor; false otherwise.
enabled (bool): If true, this capability is enabled by this neighbor; false otherwise.
"""
if supported is not None:
self.supported = supported
if enabled is not None:
self.enabled = enabled
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceNeighborCapability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceNeighborCapability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
2ecaa0902b36455da6e55c02523cefe6bcec5bfc
|
e5f4c22bfae93d3d96dea1b0ed8f3e4df373243f
|
/test.py
|
f3a74709481a1e1e55a6bdc81b7b3e3e0cf3f866
|
[] |
no_license
|
MrLokans/discover_flask
|
5925a2ab07480398543d51e33c8be2cf23b2c36b
|
63f847409dd67725bdef754cd0041f2647dabf4e
|
refs/heads/master
| 2021-01-10T16:25:21.767911
| 2016-03-07T05:44:17
| 2016-03-07T05:44:17
| 52,816,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
import unittest
from app import app
class AppTestCase(unittest.TestCase):
def setUp(self):
self.tester = app.test_client(self)
def login(self, username, password, follow_redirects=True):
return self.tester.post('/login',
data={'username': username,
'password': password},
follow_redirects=follow_redirects)
def logout(self):
return self.tester.get('/logout', follow_redirects=True)
def correctly_login(self, follow_redirects=True):
return self.login('admin', 'password', follow_redirects)
def test_index(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
def test_login_page_is_loaded(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn('Please login', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_correct_creds(self):
response = self.correctly_login()
self.assertIn('Successfully logged in', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_incorrect_creds(self):
response = self.login('incorrectuser', 'incorrectpassword')
self.assertIn('Invalid username', response.data.decode('utf-8'))
def test_logout_works(self):
response = self.correctly_login()
response = self.logout()
self.assertIn('Logged out.', response.data.decode('utf-8'))
def test_main_page_requires_user_being_logged_in(self):
response = self.tester.get('/', content_type='html/text',
follow_redirects=True)
self.assertIn('Login required', response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
[
"trikster1911@gmail.com"
] |
trikster1911@gmail.com
|
a145346bc456c2281fad96365f8d9a5af1f4cd7d
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sets_20200609191149.py
|
89a07ef2d3f4c49463319f9699998f9dd296f2fc
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
import json
def Strings(str):
# dictionary--> key value pairs
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(0,len(newArray)):
if newArray[j][0] in values:
values[j][0] =
# if newArray[j][0] in values:
# values[newArray[j][0]] += int(newArray[j][1])
# else:
# values[newArray[j][0]] = int(newArray[j][1])
# for k in values:
# keys.append(k)
# keys = sorted(keys)
# newString = ""
# last =len(keys)-1
# lastString = ""
# lastString +=keys[last] + ":" + json.dumps(values[keys[last]])
# for i in range(len(keys)-1):
# if keys[i] in values:
# newString += keys[i] + ":"+ json.dumps(values[keys[i]])+","
# finalString = newString + lastString
# print(type(finalString))
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
# "B:5,C:3,Z:5"
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
8a463bda0d0c60cd4f34f3e9d156d3254165acfc
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/repos/Flask-Large-Application-Example-master/tests/views/test_pypi_packages.py
|
27394594cc76c8ccde073c14c83e1f2757b0f036
|
[
"MIT"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
from flask import current_app
from flask.ext.celery import CELERY_LOCK
import pytest
from redis.exceptions import LockError
from pypi_portal.extensions import db, redis
from pypi_portal.models.pypi import Package
from pypi_portal.models.redis import POLL_SIMPLE_THROTTLE
from pypi_portal.tasks import pypi
class FakeDelay(object):
@staticmethod
def ready():
return False
def test_index():
assert '200 OK' == current_app.test_client().get('/pypi/').status
def test_sync_empty(alter_xmlrpc):
alter_xmlrpc(set())
redis.delete(POLL_SIMPLE_THROTTLE)
Package.query.delete()
db.session.commit()
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
assert [] == db.session.query(Package.name, Package.summary, Package.latest_version).all()
def test_sync_few(alter_xmlrpc):
alter_xmlrpc([dict(name='packageB', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_rate_limit(alter_xmlrpc):
alter_xmlrpc([dict(name='packageC', summary='Test package.', version='3.0.0'), ])
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_parallel(alter_xmlrpc):
alter_xmlrpc([dict(name='packageD', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
redis_key = CELERY_LOCK.format(task_name='pypi_portal.tasks.pypi.update_package_list')
lock = redis.lock(redis_key, timeout=1)
assert lock.acquire(blocking=False)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
try:
lock.release()
except LockError:
pass
def test_sync_many(alter_xmlrpc):
alter_xmlrpc([
dict(name='packageB1', summary='Test package.', version='3.0.0'),
dict(name='packageB2', summary='Test package.', version='3.0.0'),
dict(name='packageB3', summary='Test package.', version='3.0.0'),
dict(name='packageB4', summary='Test package.', version='3.0.0'),
dict(name='packageB5', summary='Test package.', version='3.0.0'),
])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
def test_sync_unhandled_exception():
old_throttle = pypi.THROTTLE
pypi.THROTTLE = 'nan'
redis.delete(POLL_SIMPLE_THROTTLE)
with pytest.raises(ValueError):
current_app.test_client().get('/pypi/sync').status()
pypi.THROTTLE = old_throttle
def test_sync_timeout():
old_delay = pypi.update_package_list.delay
pypi.update_package_list.delay = FakeDelay
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
pypi.update_package_list.delay = old_delay
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
6e242cc43e2c7d24c5cfd1f02e749621f9366a0e
|
0bfb4208bdf7fcfd75311c777e25a3b639bf566d
|
/backend/code/iep/auth/models.py
|
fb6f856736cbe2fd1a25f1dc89baf52a17eff536
|
[
"Apache-2.0"
] |
permissive
|
socek/iep
|
ab7833f94af739abd19f569f28de84cdcc689e95
|
793e35ca5304eef7b7dacb5dd8d486622f497759
|
refs/heads/master
| 2020-05-16T13:48:12.252161
| 2019-12-03T08:28:05
| 2019-12-03T08:28:05
| 183,082,207
| 0
| 0
|
Apache-2.0
| 2019-12-03T08:28:07
| 2019-04-23T19:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
from bcrypt import checkpw
from bcrypt import gensalt
from bcrypt import hashpw
from iep.application.model import Model
class User(Model):
def __init__(
self,
uid,
created_at=None,
updated_at=None,
name=None,
email=None,
is_admin=None,
password=None,
):
super().__init__(uid, created_at, updated_at)
self.name = name
self.email = email
self.is_admin = is_admin
self.password = password
def do_password_match(self, password):
"""
Validate if provided password match with the password from the model.
"""
if self.password:
return checkpw(password.encode("utf8"), self.password)
else:
return False
def set_password(self, password):
self.password = hashpw(password.encode("utf8"), gensalt())
def to_dict(self):
return {
'uid': self.uid,
'created_at': self.created_at,
'updated_at': self.updated_at,
'name': self.name,
'email': self.email,
'is_admin': self.is_admin,
'password': self.password,
}
|
[
"msocek@gmail.com"
] |
msocek@gmail.com
|
f77ffc69cb16459c8138b3e8578323ac411365e2
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/5644d97cc5014e18b14799feeb9b354d528a6489-<test_invalid_interfaces>-bug.py
|
88bcd68511e3ab151bad7e95439f0d409610e661
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
def test_invalid_interfaces(self):
event = self.create_sample_event(platform='invalid-interfaces')
self.browser.get('/{}/{}/issues/{}/'.format(self.org.slug, self.project.slug, event.group.id))
self.browser.wait_until('.entries')
self.browser.snapshot('issue details invalid interfaces')
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
bb4d3c4ffba8b3fdd9dae18528199a1e9560a1a0
|
43ede7b8fb546c00804c0ef94501f6e48ba170d6
|
/Cursos Python/Python 3 - Solyd/Orientacao_a_objeto.py
|
e902f0d109aa9feef7f8a68a9651bc74a65cd1bb
|
[] |
no_license
|
bopopescu/Python-13
|
db407d17252473e78e705e563cfee4dbd316c6b9
|
c8bef500f2d3e4a63d850f96dfa219eff2ecebda
|
refs/heads/master
| 2022-11-22T16:24:08.490879
| 2020-06-11T14:22:24
| 2020-06-11T14:22:24
| 281,830,055
| 0
| 0
| null | 2020-07-23T02:26:31
| 2020-07-23T02:26:30
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
class Cliente:
def __init__(self, nome, cpf, idade):
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
def dados_cliente(self):
return {'nome': self.__nome,
'cpf': self.__cpf,
'idade': self.__idade}
class Conta(Cliente):
def __init__(self, nome, cpf, idade, saldo, limite):
super().__init__(nome, cpf, idade)
# Representante da conta
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
# dados da conta
self.__saldo = float(saldo)
self.__limite = float(limite)
def saldo_atual(self):
print(f'Saldo atual: R${self.__saldo:.2f}')
def dono(self):
print('nome: ', self.__nome)
print('cpf:', self.__cpf)
print('idade :', self.__idade)
def sacar(self, valor_saque):
self.__saldo -= float(valor_saque)
print(f'Saque de R${valor_saque}, Realizado com sucesso!')
def depositar(self, valor_deposito):
self.__saldo += float(valor_deposito)
cliente = Cliente('Erickson', '19542634-05', 18)
dc = cliente.dados_cliente()
conta = Conta(dc['nome'], dc['cpf'], dc['idade'], 1500.00, 5000.00)
conta.saldo_atual()
conta.sacar(257.05)
conta.saldo_atual()
conta.saldo_atual()
conta.depositar(750.00)
conta.saldo_atual()
|
[
"ofc.erickson@gmail.com"
] |
ofc.erickson@gmail.com
|
ccf9a734c56a27aad1c7b63e96282803ea84b5a4
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/f1dbf6292b80b7cc67661707e7f1d8b5b0a06eb5-<check_params>-bug.py
|
b748d6d0116f380fc5635eaf4ef57ebc08f34ef9
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
def check_params(self):
'Check all input params'
if (not self.key_id.isdigit()):
self.module.fail_json(msg='Error: key_id is not digit.')
if ((int(self.key_id) < 1) or (int(self.key_id) > 4294967295)):
self.module.fail_json(msg='Error: The length of key_id is between 1 and 4294967295.')
if (self.state == 'present'):
if ((self.auth_type == 'encrypt') and ((len(self.password) < 20) or (len(self.password) > 392))):
self.module.fail_json(msg='Error: The length of encrypted password is between 20 and 392.')
elif ((self.auth_type == 'text') and ((len(self.password) < 1) or (len(self.password) > 255))):
self.module.fail_json(msg='Error: The length of text password is between 1 and 255.')
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
dfdfdc73b69afa83125300340f0252cfe3100d38
|
a127d0feb3bcf4f2581f385bb24f2b789c771c9c
|
/10syo/95_2.py
|
0a1ea7e35fd38d9a0daad78a923622656306fdf5
|
[] |
no_license
|
NgoVanDau/nlp100knock
|
01383e4cc5a1470508744668103b9ea1a238b892
|
3ef63c0d2dfb55c0e6a31aced645f284325a98a5
|
refs/heads/master
| 2023-03-22T13:19:23.932429
| 2018-08-05T05:27:11
| 2018-08-05T05:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
fname_input = 'combined_out.tab'
class Data:
def __init__(self, human_score, my_score):
self.human_score = human_score
self.my_score = my_score
def __repr__(self):
return 'Data%s' % repr(self.__dict__)
# データ配列作成
with open(fname_input) as data_file:
def read_data():
for line in data_file:
word1, word2, human_score, my_score = line.split('\t')
yield Data(float(human_score), float(my_score))
data = list(read_data())
# 順位付け
data_sorted_by_human_score = sorted(data, key=lambda data: data.human_score)
for order, d in enumerate(data_sorted_by_human_score):
d.human_order = order
data_sorted_by_my_score = sorted(data, key=lambda data: data.my_score)
for order, d in enumerate(data_sorted_by_my_score):
d.my_order = order
# スピアマン相関係数算出
N = len(data)
total = sum((d.human_order - d.my_order) ** 2 for d in data)
result = 1 - (6 * total) / (N ** 3 - N)
print(result)
|
[
"kota.k.1132.pda@gmail.com"
] |
kota.k.1132.pda@gmail.com
|
796965104f9a8b405aea58339305c0e917d2c247
|
7aae3051a7d08a280f7adc55b4b984bc48c87db3
|
/vehicle/admins/vehicle_model_admin.py
|
ba26d4ec5f9adf2698da8711bc9fa8bd44e5b5a4
|
[] |
no_license
|
ohahlev/ahlev-django-vehicle
|
d087375e3b49cda9253a776f79e4531bbf0a686d
|
51895c200b40be7a298a4054ba2d8945df6a84d0
|
refs/heads/master
| 2020-11-30T07:00:12.441028
| 2020-01-21T01:25:48
| 2020-01-21T01:25:48
| 230,340,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
from django.utils.html import format_html
from django.contrib import admin
from imagekit import ImageSpec
from imagekit.admin import AdminThumbnail
from imagekit.processors import ResizeToFill
from imagekit.cachefiles import ImageCacheFile
from ..models.vehicle_model import VehicleModel
from .widgets import AdminSmallestThumbnailSpec, AdminSmallThumbnailSpec
class VehicleModelAdmin(admin.ModelAdmin):
def preview_thumbnail(self, obj):
if obj.logo_thumbnail:
return format_html(u"<img src='{}'/>", obj.logo_thumbnail.url)
preview_thumbnail.short_description = 'Preview'
readonly_fields = ['preview_thumbnail']
fieldsets = [
("NAME", {
'fields': ['name', 'logo', 'preview_thumbnail'],
}),
]
search_fields = ['name']
list_display = ['name', 'preview_thumbnail', 'date_created', 'last_updated']
class Media:
css = {
'all': (
'vehicle/css/vehicle.css',
)
}
'''
js = (
'js/jquery.min.js',
'js/popper.min.js',
'js/bootstrap.min.js',
'js/mdb.min.js',
'js/myscript.js'
)
'''
admin.site.register(VehicleModel, VehicleModelAdmin)
|
[
"ohahlev@gmail.com"
] |
ohahlev@gmail.com
|
19ef56453f855c29a72eaa6c8c52e2ca967e6a36
|
f8e8e365c9cf58b61d72655bc2340baeaed5baff
|
/Leetcode/Python Solutions/Binary Search/ValidPerfectSquare.py
|
c4e8a70a8beb4c70db11315cbe222321332ff181
|
[
"MIT"
] |
permissive
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
39a69f6b9ed113efe4a420d19cad79e0aa317637
|
bc656fd655617407856e0ce45b68585fa81c5035
|
refs/heads/master
| 2023-08-31T19:54:34.242559
| 2021-11-05T03:43:35
| 2021-11-05T03:43:35
| 412,263,430
| 0
| 0
|
MIT
| 2021-09-30T23:45:29
| 2021-09-30T23:45:25
| null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
"""
LeetCode Problem 367. Valid Perfect Square
Link: https://leetcode.com/problems/valid-perfect-square/
Written by: Mostofa Adib Shakib
Language: Python
Observation:
1) Number less than 2 will always form perfect squares so return True.
2) The number will always be in the first half of the array. Hence, we can discard the second half.
Time Complexity: O(log n)
Space Complexity: O(1)
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
if num <= 1: return True
left = 2
right = num//2
while left <= right:
mid = (left + right) // 2
guess = mid * mid
if guess == num:
return True
elif guess < num:
left = mid + 1
else:
right = mid - 1
return False
|
[
"adibshakib@gmail.com"
] |
adibshakib@gmail.com
|
ed0466956305c5f5e6955a737d43b2039c8f0fc5
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/4-functional-programming/7-list-comprehension_20200422222427.py
|
81d606e197ec10031073a3db9b3879a25cb59bc1
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
#list, set, dicitonary
my_list = []
for char in 'HELLO':
my_list.append(char)
print(my_list)
dict_list = [char for char in 'good morning']
print(dict_list)
num_list = [num for num in range (0, 100)]
print(num_list)
print("divide by 3 with no remainder")
num_list3 = [num for num in range (0, 100) if(num%3 ==0)]
print(num_list3)
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
c2ee78250d0f3860d8ec164c11ab88e734704bed
|
8efd8bcd3945d88370f6203e92b0376ca6b41c87
|
/problems100_200/151_Reverse_Words_in_a_String.py
|
11b5357b6300152e2debfd6b3f1328822ffebdd4
|
[] |
no_license
|
Provinm/leetcode_archive
|
732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5
|
3e72dcaa579f4ae6f587898dd316fce8189b3d6a
|
refs/heads/master
| 2021-09-21T08:03:31.427465
| 2018-08-22T15:58:30
| 2018-08-22T15:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
#coding=utf-8
'''
151. Reverse Words in a String
Given an input string, reverse the string word by word.
Example:
Input: "the sky is blue",
Output: "blue is sky the".
Note:
A word is defined as a sequence of non-space characters.
Input string may contain leading or trailing spaces. However, your reversed string should not contain leading or trailing spaces.
You need to reduce multiple spaces between two words to a single space in the reversed string.
Follow up: For C programmers, try to solve it in-place in O(1) space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
lst = [i for i in s.split(" ") if i]
return ' '.join(reversed(lst))
s = " the sky is blue"
ss = Solution()
r = ss.reverseWords(s)
print(r)
|
[
"zhouxin@gmail.com"
] |
zhouxin@gmail.com
|
6659b4d8145e55d900dcabb7398db42929c560f4
|
d75560d9acde4f1f6457898d8862b06ba5f8dd7b
|
/backend/msm_sgsjhsjh4803_de_13561/wsgi.py
|
3cd1976c5b367e8dc49ef8d3516ab2cc510980f7
|
[] |
no_license
|
crowdbotics-apps/msm-sgsjhsjh4803-de-13561
|
af6563f775832664041dbd8abc5d05af9d8d4a4f
|
9364d828ffee0edfe68d263fce2b0a7cb2949039
|
refs/heads/master
| 2022-12-29T15:25:29.870944
| 2020-10-19T08:18:12
| 2020-10-19T08:18:12
| 305,263,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
"""
WSGI config for msm_sgsjhsjh4803_de_13561 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msm_sgsjhsjh4803_de_13561.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
e348b3e0dfab26e0cc1f9c6a114ae59be50476c4
|
4c8755443320f0e8fde2718aec40c49ef27ab6fe
|
/{{cookiecutter.repo_name}}/cookiecutter_repo/utils/loaders.py
|
0d90448aa34fd2244e0f3ef816996b8e56608d99
|
[
"MIT"
] |
permissive
|
ethman/cookiecutter-nussl
|
28266f2b714607493016aa554794617e1cb431aa
|
302df1bee74b13ff0e2c6725997f7b7fa26b32d5
|
refs/heads/master
| 2020-12-09T23:50:09.844838
| 2020-01-12T17:19:06
| 2020-01-12T17:19:06
| 233,449,725
| 0
| 0
| null | 2020-01-12T19:54:48
| 2020-01-12T19:54:47
| null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
from .. import dataset, model
def load_dataset(dataset_class, dataset_folder, dataset_config):
DatasetClass = getattr(dataset, dataset_class)
dataset_instance = DatasetClass(dataset_folder, dataset_config)
return dataset_instance
def load_model(model_config):
model_class = model_config.pop('class', 'SeparationModel')
ModelClass = getattr(model, model_class)
if model_class == 'SeparationModel':
model_instance = ModelClass(model_config, extra_modules=model.extras)
else:
model_instance = ModelClass(model_config)
return model_instance
|
[
"prem@u.northwestern.edu"
] |
prem@u.northwestern.edu
|
837d8f52574c6bab972f540869f2bca52b2bf000
|
94c8dd4126da6e9fe9acb2d1769e1c24abe195d3
|
/qiskit/circuit/library/boolean_logic/quantum_or.py
|
0864affb7958edffe5050f3c8d54af82bdc515be
|
[
"Apache-2.0"
] |
permissive
|
levbishop/qiskit-terra
|
a75c2f96586768c12b51a117f9ccb7398b52843d
|
98130dd6158d1f1474e44dd5aeacbc619174ad63
|
refs/heads/master
| 2023-07-19T19:00:53.483204
| 2021-04-20T16:30:16
| 2021-04-20T16:30:16
| 181,052,828
| 1
| 0
|
Apache-2.0
| 2019-06-05T15:32:13
| 2019-04-12T17:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,664
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Implementations of boolean logic quantum circuits."""
from typing import List, Optional
from qiskit.circuit import QuantumRegister, QuantumCircuit
from qiskit.circuit.library.standard_gates import MCXGate
class OR(QuantumCircuit):
r"""A circuit implementing the logical OR operation on a number of qubits.
For the OR operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of any variable qubit is ``True``. The OR is implemented using
a multi-open-controlled X gate (i.e. flips if the state is :math:`|0\rangle`) and
applying an X gate on the result qubit.
Using a list of flags, qubits can be skipped or negated.
The OR gate without special flags:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5)
%circuit_library_info circuit
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` or one of the last two are ``True`` we use the
flags ``[-1, 0, 0, 1, 1]``.
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5, flags=[-1, 0, 0, 1, 1])
%circuit_library_info circuit
"""
def __init__(self, num_variable_qubits: int, flags: Optional[List[int]] = None,
mcx_mode: str = 'noancilla') -> None:
"""Create a new logical OR circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omissions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
# store num_variables_qubits and flags
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name='variable')
qr_result = QuantumRegister(1, name='result')
super().__init__(qr_variable, qr_result, name='or')
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is > 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag > 0]
# determine the number of ancillas
self.num_ancilla_qubits = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits, 'ancilla')
self.add_register(qr_ancilla)
else:
qr_ancilla = []
self.x(qr_result)
if len(flip_qubits) > 0:
self.x(flip_qubits)
self.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
self.x(flip_qubits)
|
[
"noreply@github.com"
] |
levbishop.noreply@github.com
|
a663a571c791506a5bbea2e874df529dbed68ebb
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1907/py02/day01/cut_log.py
|
cceaf977d83d75e82696a61778603e0948c24313
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675
| 2020-02-22T08:36:21
| 2020-02-22T08:36:21
| 183,539,489
| 21
| 24
| null | 2020-05-17T12:07:55
| 2019-04-26T02:06:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 525
|
py
|
import time
t9 = time.strptime('2019-05-15 09:00:00', '%Y-%m-%d %H:%M:%S')
t12 = time.strptime('2019-05-15 12:00:00', '%Y-%m-%d %H:%M:%S')
with open('mylog.txt') as fobj:
for line in fobj:
t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
if t > t12:
break
if t >= t9:
print(line, end='')
# with open('mylog.txt') as fobj:
# for line in fobj:
# t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
# if t9 <= t <= t12:
# print(line, end='')
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
7394010400225008bcf0ebefdea0242ca3765d3e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_96/1509.py
|
985390ba9c8945569ce4096912a9a40962d7ecaf
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
from string import split
f1=open('B-large.in','r')
f2=open('out.txt','w')
t=int(f1.readline())
for i in range (t):
k=0
s=f1.readline()
data=list(map(int,s.split(' ')))
u=data[1]+0
for j in range(data[0]):
if data[j+3]==0 or data[j+3]==1:
if data[j+3]>=data[2]:
k+=1
elif data[1]==0:
if data[j+3] % 3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3!=0 and data[j+3]//3+1>=data[2]:
k+=1
else:
if data[j+3]%3==1 and data[j+3]//3+1>=data[2]:
k+=1
elif data[j+3]%3==0 and data[j+3]//3+1==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3==2 and data[j+3]//3+2==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==2 and data[j+3]//3+1>=data[2]:
k+=1
f2.write ("Case #"+str(i+1)+": "+str(k)+"\n")
f1.close()
f2.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
3a7a4e0fc74d98a3d4bb90e7220f2bca91eaa4d0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/97/usersdata/239/54617/submittedfiles/lecker.py
|
e3ba12e1ff6dc558621a8f2f17a217e1787cc426
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def lecker (lista):
cont=0
for i in range (0,len(lista),1
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i-1]:
if lista[i]>lista[i+1]:
cont=cont+1
if cont==1:
return True
else:
return False
n=int(input("Digite a quantidade de elementos da lista:"))
a=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
a.append(valor)
b=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
b.append(valor)
if lecker (a):
print("S")
else:
print("N")
if lecker (b):
print("S")
else:
print("N")
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4ae9d4cd17ad18027fa1dffe901e6463804b40c4
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/python/typecheck/mypy/skip_field.py
|
672a681eeba2e506b35d3c2f51bbadb683934354
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.engine.target import BoolField
class SkipMyPyField(BoolField):
alias = "skip_mypy"
default = False
help = "If true, don't run MyPy on this target's code."
def rules():
return [
PythonSourcesGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonSourceTarget.register_plugin_field(SkipMyPyField),
PythonTestsGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonTestTarget.register_plugin_field(SkipMyPyField),
PythonTestUtilsGeneratorTarget.register_plugin_field(SkipMyPyField),
]
|
[
"noreply@github.com"
] |
pantsbuild.noreply@github.com
|
1586616caf1191874f3dfdf0a908af9d390cbd3e
|
54eeab2befaa4bf0d96a7bd18110900f8f32c766
|
/other/sql/sqlite.py
|
cc06497fe5586ae73d672cbedf67aa19174a1c04
|
[] |
no_license
|
w8833531/mypython
|
40239ada90426db73444ee54e6e79decc6c9fc9b
|
45ed12a611efd33838766e7bd73840e6d8b73e28
|
refs/heads/master
| 2021-01-19T06:59:09.790525
| 2017-10-18T06:20:43
| 2017-10-18T06:20:43
| 87,513,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#由于SQLite的驱动内置在Python标准库中,所以我们可以直接来操作SQLite数据库。
#要操作关系数据库,首先需要连接到数据库,一个数据库连接称为Connection;
#连接到数据库后,需要打开游标,称之为Cursor,通过Cursor执行SQL语句,然后,获得执行结果。
#导入SQLite 驱动
import sqlite3
try:
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
# cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values(\'3\', \'Wu\')')
print cursor.rowcount
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.commit()
conn.close()
#在Python中操作数据库时,要先导入数据库对应的驱动,然后,通过Connection对象和Cursor对象操作数据。
#要确保打开的Connection对象和Cursor对象都正确地被关闭,否则,资源就会泄露。
try:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
cursor.execute('select * from user')
values = cursor.fetchall()
print values
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.close()
|
[
"w8833531@hotmail.com"
] |
w8833531@hotmail.com
|
24f4ad0bc75271d08496072c0885072c734d3990
|
5b1ff6054c4f60e4ae7315db9f20a334bc0b7634
|
/Launchkey_MK2/Colors.py
|
6f5028d35ea48a5ef4fb11c613cb1206a59fc846
|
[] |
no_license
|
maratbakirov/AbletonLive9_RemoteScripts
|
2869122174634c75405a965401aa97a2dae924a1
|
4a1517c206353409542e8276ebab7f36f9bbd4ef
|
refs/heads/master
| 2021-06-05T14:38:27.959025
| 2021-05-09T11:42:10
| 2021-05-09T11:42:10
| 13,348,327
| 3
| 4
| null | 2016-10-16T13:51:11
| 2013-10-05T16:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,566
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Launchkey_MK2/Colors.py
from _Framework.ButtonElement import Color
from .consts import BLINK_LED_CHANNEL, PULSE_LED_CHANNEL
class Blink(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Blink, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=BLINK_LED_CHANNEL)
class Pulse(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Pulse, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=PULSE_LED_CHANNEL)
class Rgb:
BLACK = Color(0)
DARK_GREY = Color(1)
GREY = Color(2)
WHITE = Color(3)
RED = Color(5)
RED_BLINK = Blink(5)
RED_PULSE = Pulse(5)
RED_HALF = Color(7)
ORANGE = Color(9)
ORANGE_HALF = Color(11)
AMBER = Color(96)
AMBER_HALF = Color(14)
YELLOW = Color(13)
YELLOW_HALF = Color(15)
DARK_YELLOW = Color(17)
DARK_YELLOW_HALF = Color(19)
GREEN = Color(21)
GREEN_BLINK = Blink(21)
GREEN_PULSE = Pulse(21)
GREEN_HALF = Color(27)
MINT = Color(29)
MINT_HALF = Color(31)
LIGHT_BLUE = Color(37)
LIGHT_BLUE_HALF = Color(39)
BLUE = Color(45)
BLUE_HALF = Color(47)
DARK_BLUE = Color(49)
DARK_BLUE_HALF = Color(51)
PURPLE = Color(53)
PURPLE_HALF = Color(55)
DARK_PURPLE = Color(59)
BRIGHT_PURPLE = Color(81)
DARK_ORANGE = Color(84)
CLIP_COLOR_TABLE = {15549221: 60,
12411136: 61,
11569920: 62,
8754719: 63,
5480241: 64,
695438: 65,
31421: 66,
197631: 67,
3101346: 68,
6441901: 69,
8092539: 70,
3947580: 71,
16712965: 72,
12565097: 73,
10927616: 74,
8046132: 75,
4047616: 76,
49071: 77,
1090798: 78,
5538020: 79,
8940772: 80,
10701741: 81,
12008809: 82,
9852725: 83,
16149507: 84,
12581632: 85,
8912743: 86,
1769263: 87,
2490280: 88,
6094824: 89,
1698303: 90,
9160191: 91,
9611263: 92,
12094975: 93,
14183652: 94,
16726484: 95,
16753961: 96,
16773172: 97,
14939139: 98,
14402304: 99,
12492131: 100,
9024637: 101,
8962746: 102,
10204100: 103,
8758722: 104,
13011836: 105,
15810688: 106,
16749734: 107,
16753524: 108,
16772767: 109,
13821080: 110,
12243060: 111,
11119017: 112,
13958625: 113,
13496824: 114,
12173795: 115,
13482980: 116,
13684944: 117,
14673637: 118,
16777215: 119}
RGB_COLOR_TABLE = ((0, 0),
(1, 1973790),
(2, 8355711),
(3, 16777215),
(4, 16731212),
(5, 16711680),
(6, 5832704),
(7, 1638400),
(8, 16760172),
(9, 16733184),
(10, 5840128),
(11, 2562816),
(12, 16777036),
(13, 16776960),
(14, 5855488),
(15, 1644800),
(16, 8978252),
(17, 5570304),
(18, 1923328),
(19, 1321728),
(20, 5046092),
(21, 65280),
(22, 22784),
(23, 6400),
(24, 5046110),
(25, 65305),
(26, 22797),
(27, 6402),
(28, 5046152),
(29, 65365),
(30, 22813),
(31, 7954),
(32, 5046199),
(33, 65433),
(34, 22837),
(35, 6418),
(36, 5030911),
(37, 43519),
(38, 16722),
(39, 4121),
(40, 5015807),
(41, 22015),
(42, 7513),
(43, 2073),
(44, 5000447),
(45, 255),
(46, 89),
(47, 25),
(48, 8867071),
(49, 5505279),
(50, 1638500),
(51, 983088),
(52, 16731391),
(53, 16711935),
(54, 5832793),
(55, 1638425),
(56, 16731271),
(57, 16711764),
(58, 5832733),
(59, 2228243),
(60, 16717056),
(61, 10040576),
(62, 7950592),
(63, 4416512),
(64, 211200),
(65, 22325),
(66, 21631),
(67, 255),
(68, 17743),
(69, 2425036),
(70, 8355711),
(71, 2105376),
(72, 16711680),
(73, 12451629),
(74, 11529478),
(75, 6618889),
(76, 1084160),
(77, 65415),
(78, 43519),
(79, 11007),
(80, 4129023),
(81, 7995647),
(82, 11672189),
(83, 4202752),
(84, 16730624),
(85, 8970502),
(86, 7536405),
(87, 65280),
(88, 3931942),
(89, 5898097),
(90, 3735500),
(91, 5999359),
(92, 3232198),
(93, 8880105),
(94, 13835775),
(95, 16711773),
(96, 16744192),
(97, 12169216),
(98, 9502464),
(99, 8609031),
(100, 3746560),
(101, 1330192),
(102, 872504),
(103, 1381674),
(104, 1450074),
(105, 6896668),
(106, 11010058),
(107, 14569789),
(108, 14182940),
(109, 16769318),
(110, 10412335),
(111, 6796559),
(112, 1973808),
(113, 14483307),
(114, 8454077),
(115, 10131967),
(116, 9332479),
(117, 4210752),
(118, 7697781),
(119, 14745599),
(120, 10485760),
(121, 3473408),
(122, 1757184),
(123, 475648),
(124, 12169216),
(125, 4141312),
(126, 11755264),
(127, 4920578))
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
ab14c4d4a9d8c432ae24647c18b9e98e4968ece0
|
90be755a741d6c93dd59d4acef8b27b4cf93ff54
|
/src/elsia/scripts/get_abs_ori.py
|
8decc0c43e2f8716f8f28629c4b7ed417de7cc24
|
[] |
no_license
|
karry3775/Elsia_ws
|
05aa5786a6f3f64b70c7ceafead6d72d4ca18bab
|
031f8006e9a439d9947be5ed288a666f20fca3a7
|
refs/heads/master
| 2023-02-21T05:21:10.842475
| 2021-01-23T14:58:57
| 2021-01-23T15:21:46
| 326,032,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,405
|
py
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError
from nav_msgs.msg import Odometry
import cv2
import numpy as np
import math as m
# initialize the node
rospy.init_node("get_abs_ori_node")
# global variables
best_ori_estimate = 0.0
ini_angle_offset = 0.0
# create publishers
odom_pub = rospy.Publisher("/abs_orientation_odom", Odometry, queue_size=10)
image_pub = rospy.Publisher("/considered_image", Image, queue_size=10)
# global variable for whether to DEBUG or not
DEBUG = False
def wrap2Pi(theta):
wrappedUpVal = m.atan2(m.sin(theta), m.cos(theta))
return wrappedUpVal
def abs_ori_cb(msg):
global best_ori_estimate
try:
cv_image = CvBridge().imgmsg_to_cv2(msg, "bgr8")
# crop out the excess image
cv_image = cv_image[100:300, 100:300, :]
except CvBridgeError as e:
print("[INFO]: Error in obtaining image from CvBridge! Skipping frame!")
else:
# convert to gray
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
# convert to edges
edges = cv2.Canny(gray, 50, 150)
cv2.imshow("edges", edges)
cv2.waitKey(1)
# convert to thresholded image
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# extract hough lines
lines = cv2.HoughLinesP(edges, 1, m.pi/180, 2, None, 20, 1)
# list of [count, angle] pairs
cnt_ang_pair = []
# draw lines
for i in range(lines.shape[0]):
for line in lines[i]:
pt1 = (line[0], line[1])
pt2 = (line[2], line[3])
cv2.line(cv_image, pt1, pt2, (255, 0, 0), 3)
# calculate angle
ang = m.atan2(pt2[1]-pt1[1], pt2[0]-pt1[0])
cnt_ang_pair.append([1, m.degrees(ang)])
###################### show the detected lines ########################
cv2.imshow("frame", cv_image)
cv2.waitKey(1)
#######################################################################
if len(cnt_ang_pair) != 0:
# sort the cnt_ang_pair
cnt_ang_pair.sort(key=lambda x: x[1])
# bunch up the pairs based on predetermined threshold
ang_thresh_deg = 1
bunch = [cnt_ang_pair[0]]
for i in range(1, len(cnt_ang_pair)):
pairs = cnt_ang_pair[i]
if abs(pairs[1] - bunch[-1][1]) < ang_thresh_deg:
# update the value and the count
new_count = bunch[-1][0] + 1
new_value = (
(bunch[-1][1] * (new_count - 1) * 1.0) / new_count) + (pairs[1]*1.0) / new_count
bunch[-1] = [new_count, new_value]
else:
# time to append
bunch.append(pairs)
# sort bunch based on first value i.e. count
bunch.sort(key=lambda x: x[0], reverse=True)
if DEBUG:
print("The cnt_ang_pair list is: \n {} \n".format(cnt_ang_pair))
print("The bunched up list is: \n {} \n".format(bunch))
# use the first value of bunch
f_ori = m.radians(bunch[0][1]) # in degrees
f_ori1 = wrap2Pi(f_ori + m.radians(90) - ini_angle_offset)
f_ori2 = wrap2Pi(f_ori + m.radians(-90) - ini_angle_offset)
f_ori3 = wrap2Pi(f_ori + m.radians(180) - ini_angle_offset)
# we need to find which has the smallest difference
# f_ori, f_ori1 or f_ori2
if(abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori
elif(abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori1
elif(abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori2
else:
best_ori_estimate_temp = f_ori3
# will get the best_ori_estimate in degrees , the choice is made so that any difference will be amplified more than radians
best_ori_estimate = best_ori_estimate_temp
if DEBUG:
print("best ori estimate: {} deg".format(
m.degrees(best_ori_estimate)))
# to debug lets plot the best_ori_estimate in the image
pt1 = [200, 200]
pt2 = [200, 200]
line_angle = best_ori_estimate
pt2[0] = int(pt2[0] + 200*m.cos(line_angle))
pt2[1] = int(pt2[1] + 200*m.sin(line_angle))
cv2.line(cv_image, (pt1[0], pt1[1]),
(pt2[0], pt2[1]), (0, 0, 255), 3)
# publish abs odometry for yaw
# create euler angles
roll = 0
pitch = 0
yaw = -best_ori_estimate
# convert to quaternion
q = quaternion_from_euler(roll, pitch, yaw)
# create a odom message
odom_msg = Odometry()
odom_msg.pose.pose.orientation.x = q[0]
odom_msg.pose.pose.orientation.y = q[1]
odom_msg.pose.pose.orientation.z = q[2]
odom_msg.pose.pose.orientation.w = q[3]
odom_msg.header.frame_id = "odom"
odom_msg.header.stamp = rospy.Time().now()
odom_pub.publish(odom_msg)
rosimg = CvBridge().cv2_to_imgmsg(cv_image, "bgr8")
image_pub.publish(rosimg)
if __name__ == "__main__":
try:
abs_ori_sub = rospy.Subscriber(
"/stereo/left_upward/image_rect", Image, abs_ori_cb)
rospy.spin()
except rospy.ROSInterruptException:
pass
|
[
"kartikprakash3775@gmail.com"
] |
kartikprakash3775@gmail.com
|
b1dde0477b45dffe82a9f680f72b5dc5f910eee9
|
3eb4d64a8bb0bc240a2ef189724f4d51b5275eac
|
/heltour/tournament/migrations/0106_auto_20161031_0546.py
|
059d9943ff0cb31240b7a8a561df84ba822d9f3b
|
[
"MIT"
] |
permissive
|
brucemubayiwa/heltour
|
c01cc88be7f86dce8246f619d7aa2da37e0e0ac2
|
fa4e9b06343acaf6a8a99337860e1ad433e68f6b
|
refs/heads/master
| 2021-01-23T19:59:04.099215
| 2017-09-06T03:34:31
| 2017-09-06T03:34:31
| 102,840,526
| 1
| 0
| null | 2017-09-08T08:53:30
| 2017-09-08T08:53:30
| null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-31 05:46
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import select2.fields
class Migration(migrations.Migration):
dependencies = [
('tournament', '0105_seasonplayer_final_rating'),
]
operations = [
migrations.AlterField(
model_name='alternateassignment',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='availabletime',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='gamenomination',
name='nominating_player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='leaguemoderator',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playeravailability',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerbye',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerlateregistration',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerwithdrawl',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonplayer',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonprizewinner',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='teammember',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
]
|
[
"ben.cyanfish@gmail.com"
] |
ben.cyanfish@gmail.com
|
3ec6bfaea601759fd9ce090e2468cd49049e454d
|
88cfeb8f7076450e7a38d31ab2d11883c1818c8d
|
/net/dpn92.py
|
bee4297159590c50e4ca40b1570569426a17eb3b
|
[] |
no_license
|
ZQPei/Alibaba_Cloud_German_AI_Challenge_for_Earth_Observation
|
4e5a127c12e0c02ed1914ab000a131e1a7f7d844
|
c2efb32763af0a56a3a7ecb9d83c0744f71d5c14
|
refs/heads/master
| 2020-04-26T04:31:57.731178
| 2019-02-17T01:10:55
| 2019-02-17T01:10:55
| 173,305,034
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,563
|
py
|
'''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(10, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 17)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
|
[
"dfzspzq@163.com"
] |
dfzspzq@163.com
|
9262d9b3881e896a97b190c2ea16eeea43d24d9c
|
958c19436632b41b43c9462337d13e836935a9da
|
/E01_python_for_data_analysis/04_NumPy/0403_numpy_cal.py
|
24af0cf6e7c79bf551a52bc51df3c822da19b676
|
[] |
no_license
|
Vincent105/ML
|
4752b2a99c124e01e40e383a0177fb5d82115cb6
|
fa926caabf83628b3fb7d74cee02a3e923a917f7
|
refs/heads/master
| 2020-12-29T18:21:50.144711
| 2020-10-12T09:56:41
| 2020-10-12T09:56:41
| 238,697,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import numpy as np
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr)
print(arr * arr)
print(arr * arr - arr)
# 数组与标量的算术运算会将标量值传播到各个元素:
print(1 / arr)
print(arr * 0.5)
# 大小相同的数组之间的比较会生成布尔值数组:
arr2 = np.array([[0., 4., 1.], [7., 2., 12.]])
print(arr2)
print(arr2 > arr)
|
[
"vincent1050917@gmail.com"
] |
vincent1050917@gmail.com
|
b352068896dbae835d20da90ab54de2d4f34fec9
|
d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25
|
/contests_atcoder/arc017/arc017_c.py
|
80b806e3389e7dfd81e012229a4a9723cc08f1d5
|
[
"BSD-2-Clause"
] |
permissive
|
stdiorion/competitive-programming
|
5020a12b85f1e691ceb0cacd021606a9dc58b72c
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
refs/heads/main
| 2023-03-27T01:13:42.691586
| 2021-03-08T08:05:53
| 2021-03-08T08:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
from bisect import bisect_left, bisect_right
n, x = map(int, input().split())
w = [int(input()) for _ in range(n)]
pt1 = w[:16]
pt2 = w[16:]
w1 = []
for bit in range(1 << len(pt1)):
weight = 0
for i in range(len(pt1)):
if (bit >> i) & 1:
weight += pt1[i]
w1.append(weight)
if not len(pt2):
print(w1.count(x))
exit()
w2 = []
for bit in range(1 << len(pt2)):
weight = 0
for i in range(len(pt2)):
if (bit >> i) & 1:
weight += pt2[i]
w2.append(weight)
ans = 0
w1.sort()
w2.sort()
i2 = 0
for weight1 in w1:
ans += bisect_right(w2, x - weight1) - bisect_left(w2, x - weight1)
print(ans)
|
[
"itkn1900@gmail.com"
] |
itkn1900@gmail.com
|
23241518e94ae0d5c41c03ff56152a117f302c17
|
d7ec67a5ba315103fa6a6bae6dc045f1fecf7add
|
/docs_master_tensorflow/keras/tf_dqn_simple_master/dqn_agent.py
|
d0dc2cccfa0c1fbf14d21175a9b41c3605ff96e2
|
[] |
no_license
|
munezou/PycharmProject
|
cc62f5e4278ced387233a50647e8197e009cc7b4
|
26126c02cfa0dc4c0db726f2f2cabb162511a5b5
|
refs/heads/master
| 2023-03-07T23:44:29.106624
| 2023-01-23T16:16:08
| 2023-01-23T16:16:08
| 218,804,126
| 2
| 1
| null | 2023-02-28T23:58:22
| 2019-10-31T15:57:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,247
|
py
|
from collections import deque
import os
import numpy as np
import tensorflow as tf
class DQNAgent:
"""
Multi Layer Perceptron with Experience Replay
"""
def __init__(self, enable_actions, environment_name):
# parameters
self.name = os.path.splitext(os.path.basename(__file__))[0]
self.environment_name = environment_name
self.enable_actions = enable_actions
self.n_actions = len(self.enable_actions)
self.minibatch_size = 32
self.replay_memory_size = 1000
self.learning_rate = 0.001
self.discount_factor = 0.9
self.exploration = 0.1
self.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
self.model_name = "{}.ckpt".format(self.environment_name)
# replay memory
self.D = deque(maxlen=self.replay_memory_size)
# model
self.init_model()
# variables
self.current_loss = 0.0
def init_model(self):
# input layer (8 x 8)
self.x = tf.placeholder(tf.float32, [None, 8, 8])
# flatten (64)
x_flat = tf.reshape(self.x, [-1, 64])
# fully connected layer (32)
W_fc1 = tf.Variable(tf.truncated_normal([64, 64], stddev=0.01))
b_fc1 = tf.Variable(tf.zeros([64]))
h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)
# output layer (n_actions)
W_out = tf.Variable(tf.truncated_normal([64, self.n_actions], stddev=0.01))
b_out = tf.Variable(tf.zeros([self.n_actions]))
self.y = tf.matmul(h_fc1, W_out) + b_out
# loss function
self.y_ = tf.placeholder(tf.float32, [None, self.n_actions])
self.loss = tf.reduce_mean(tf.square(self.y_ - self.y))
# train operation
optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
self.training = optimizer.minimize(self.loss)
# saver
self.saver = tf.train.Saver()
# session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def Q_values(self, state):
# Q(state, action) of all actions
return self.sess.run(self.y, feed_dict={self.x: [state]})[0]
def select_action(self, state, epsilon):
if np.random.rand() <= epsilon:
# random
return np.random.choice(self.enable_actions)
else:
# max_action Q(state, action)
return self.enable_actions[np.argmax(self.Q_values(state))]
def store_experience(self, state, action, reward, state_1, terminal):
self.D.append((state, action, reward, state_1, terminal))
def experience_replay(self):
state_minibatch = []
y_minibatch = []
# sample random minibatch
minibatch_size = min(len(self.D), self.minibatch_size)
minibatch_indexes = np.random.randint(0, len(self.D), minibatch_size)
for j in minibatch_indexes:
state_j, action_j, reward_j, state_j_1, terminal = self.D[j]
action_j_index = self.enable_actions.index(action_j)
y_j = self.Q_values(state_j)
if terminal:
y_j[action_j_index] = reward_j
else:
# reward_j + gamma * max_action' Q(state', action')
y_j[action_j_index] = reward_j + self.discount_factor * np.max(self.Q_values(state_j_1)) # NOQA
state_minibatch.append(state_j)
y_minibatch.append(y_j)
# training
self.sess.run(self.training, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
# for log
self.current_loss = self.sess.run(self.loss, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
def load_model(self, model_path=None):
if model_path:
# load from model_path
self.saver.restore(self.sess, model_path)
else:
# load from checkpoint
checkpoint = tf.train.get_checkpoint_state(self.model_dir)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
def save_model(self):
self.saver.save(self.sess, os.path.join(self.model_dir, self.model_name))
|
[
"kazumikm0119@pi5.fiberbit.net"
] |
kazumikm0119@pi5.fiberbit.net
|
42bcc717daa52c76b623b77adb64ac1e50d8fe60
|
b57d337ddbe946c113b2228a0c167db787fd69a1
|
/scr/py00033SpiderDeath.py
|
6fd5b9134c2358a8544c5ef441100d8e4da50196
|
[] |
no_license
|
aademchenko/ToEE
|
ebf6432a75538ae95803b61c6624e65b5cdc53a1
|
dcfd5d2de48b9d9031021d9e04819b309d71c59e
|
refs/heads/master
| 2020-04-06T13:56:27.443772
| 2018-11-14T09:35:57
| 2018-11-14T09:35:57
| 157,520,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
from toee import *
from utilities import *
from combat_standard_routines import *
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
if (attachee.map == 5069):
game.global_vars[3] = game.global_vars[3] + 1
if (game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5002):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5003):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL):
ring = attachee.item_find( 3000 )
ring.destroy()
return RUN_DEFAULT
|
[
"demchenko.recruitment@gmail.com"
] |
demchenko.recruitment@gmail.com
|
fa428df271c1a095589ea4dda94bbd27ca4f7705
|
06870667821f26b0c8c96b52321938df58fd91f6
|
/parking_scrapers/scrapers/new_haven.py
|
85e9236cddfb6c481a2d0bfc60ccfb3c43b84610
|
[] |
no_license
|
jmcarp/open-parking-spaces
|
69244962a316fe6bd3273ba6837bfe8d0f1f4b8e
|
5f855a1b25c9109f15af26e1fb3b4ecbd3ef5845
|
refs/heads/master
| 2023-01-24T11:43:53.641262
| 2020-11-30T19:00:46
| 2020-11-30T19:00:46
| 312,906,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
import re
from typing import Iterator
import lxml.html
import requests
from base import LotSpaces, Scraper
class NewHavenScraper(Scraper):
"""Scrape New Haven html.
https://parknewhaven.com
"""
HTML_URL = "https://parknewhaven.com"
TIMEOUT = 5
SPACES_PATTERN = re.compile(r"(.*?):\s+(\d+)% \((\d+) available\)", re.IGNORECASE)
name = "new_haven"
def fetch_spaces(self) -> Iterator[LotSpaces]:
response = requests.get(
self.HTML_URL,
headers={"User-Agent": "open-parking-spaces"},
timeout=self.TIMEOUT,
)
response.raise_for_status()
doc = lxml.html.fromstring(response.content)
links = doc.xpath(
'//div[contains(@class, "tickr")]//a[contains(@class, "tickrlink")]'
)
for link in links:
match = self.SPACES_PATTERN.search(link.text_content())
assert match is not None
lot, percent, spaces = match.groups()
yield LotSpaces(
lot=lot,
spaces=int(spaces),
url=link.attrib["href"],
)
|
[
"jm.carp@gmail.com"
] |
jm.carp@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.