blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efce919b3664c741e5e5cdd3efddf076345b6093
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/PyPattyrn-master/pypattyrn/structural/decorator.py
|
947743818a1dceeb4c5e662540bafd0242dc5234
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 2,788
|
py
|
from functools import partial
from abc import ABCMeta, abstractmethod
class Decorator(object, metaclass=ABCMeta):
"""
Base Decorator class that all decorator classes inherit from.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __get__(self, instance, owner):
"""
Override __get__ in order to get the instance of a bound of method call.
"""
return partial(self.__call__, instance)
@abstractmethod
def __call__(self, *args, **kwargs):
"""
All decorators must implement a __call__ method.
"""
pass
class DecoratorSimple(Decorator, metaclass=ABCMeta):
"""
A Base Decorator class for decorators with no arguments.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __init__(self, func):
"""
Initialize a new DecoratorSimple instance.
@param func: The function being decorated.
"""
self.func = func
class DecoratorComplex(Decorator, metaclass=ABCMeta):
"""
A Base Decorator class for decorators with arguments.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
@abstractmethod
def __init__(self, *args, **kwargs):
"""
Initialize a new DecoratorComplex instance.
@param args: Args for the decorator.
@param kwargs: Keyword args for the decorator.
"""
pass
@abstractmethod
def __call__(self, func, *args, **kwargs):
"""
Concrete DecoratorComplex instances must override the __call__ method.
@param func: The function being decorated.
@param args: Arguments for the decorated function.
@param kwargs: Keyword arguments for the decorated function.
@return:
"""
pass
class CallWrapper(DecoratorSimple):
"""
A Decorator for wrapping DecoratorComplex __call__ methods.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __call__(self, instance, func):
"""
Wrap a concrete DecoratorComplex __call__ method.
"""
def wrapped(*args, **kwargs):
return self.func(instance, func, *args, **kwargs)
return wrapped
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
ef0eff35233191513b76fde38c78e1f3e0f13ea5
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Attack/Attack.spec
|
c15c4364b14dd93124576beac54254b27f0aea6d
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 975
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['Attack.py'],
pathex=['D:\\Users\\dhhyey\\Code'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='Attack',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Attack')
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
9099b68c27e3fbffcb996837b243735fb81db3c4
|
cc5eb8eb50d64ffbca780c42a908053ec549f295
|
/Python Course CSe11309x/quiz3_prog3.py
|
2b9ed7f99272eb84762ebb8b2b6185efa3fb98e4
|
[] |
no_license
|
bemagee/LearnPython
|
328b1f7a9d5046fe1503aece8a5134a7dd2727d2
|
a42565f8fb45f9e2ebbcdcf359ebb9092bf837c2
|
refs/heads/master
| 2020-12-13T02:45:30.308604
| 2016-10-24T03:09:12
| 2016-10-24T03:09:12
| 10,793,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
#!/usr/bin/env python3
# Write a program that asks the user for a positive number 'n' as input. Assume that the user
# enters a number greater than or equal to 3 and print a triangle as described below.
# For example if the user enters 6 then the output should be:
#*
#**
#***
#****
#*****
#******
#*****
#****
#***
#**
#*
my_int = int(input("enter a positive int: "))
for going_up in range(0, my_int) :
print(going_up * "*")
while my_int > 0 :
print(my_int * "*")
my_int -= 1
|
[
"bemagee@gmail.com"
] |
bemagee@gmail.com
|
d58ebf4ac0db230cddebff1dad95bb3e7734694c
|
6ae1ba7d2ad2a97725699e1467171cc2deebb0d4
|
/文件、异常/02-文件的路径.py
|
3e1488362ed58e9b33d2e32db13dabf3db3ec841
|
[] |
no_license
|
weizt/python_studying
|
6a95edcf781bd91d8b5f2684f1c6046a40a6d432
|
6dace3a6b73032de220c4b3ea4b3c37d47880ff1
|
refs/heads/master
| 2023-01-18T19:45:48.291898
| 2020-12-02T00:59:01
| 2020-12-02T00:59:01
| 293,965,283
| 2
| 0
| null | 2020-09-13T01:15:50
| 2020-09-09T01:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
# 路径:绝对路径 和 相对路径
# 绝对路径:是从盘符开始的路径,详细的路径
# 相对路径:从当前路径开始的路径,简写的路径 推荐
# 技巧:右键点击文件名,打开Show in Explorer。直接打开文件位置
import os
print(os.sep) # 在windows操作系里,文件夹之间用 \ 分隔;
# 而python中\表示字符串转义,所以会冲突
# 解决\冲突的三种方法
# 1.用两个\\表示一个\\
# 2.在字符串前加 r
# 3.在非windows系统用 / 推荐这一种
# file = open('D:\\桌面备份文件\\207操作手册.txt', encoding='gbk')
# file = open(r'D:\桌面备份文件\207操作手册.txt', encoding='gbk')
file = open('D:/桌面备份文件/207操作手册.txt', encoding='gbk')
print(file.read())
# 相对路径:当前文件所在的文件夹开始的路径。直接可以打开
file2 = open('01-文件的打开和关闭.py', encoding='utf8')
print(file2.read())
# ../ 表示返回到上一级文件夹
# ./ 可以省略不写,表示当前文件夹
file3 = open('./../requirements.txt', encoding='utf8')
print(file3.read())
file.close()
file2.close()
file3.close()
|
[
"weizhetao3@163.com"
] |
weizhetao3@163.com
|
0d03883d7784067097556669a9d66f4dde920729
|
de01cb554c2292b0fbb79b4d5413a2f6414ea472
|
/algorithms/Medium/360.sort-transformed-array.py
|
0f69da25ba1935966ee90c3a22fa9f0d71097b1f
|
[] |
no_license
|
h4hany/yeet-the-leet
|
98292017eadd3dde98a079aafcd7648aa98701b4
|
563d779467ef5a7cc85cbe954eeaf3c1f5463313
|
refs/heads/master
| 2022-12-10T08:35:39.830260
| 2020-09-02T23:12:15
| 2020-09-02T23:12:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
#
# @lc app=leetcode id=360 lang=python3
#
# [360] Sort Transformed Array
#
# https://leetcode.com/problems/sort-transformed-array/description/
#
# algorithms
# Medium (48.92%)
# Total Accepted: 35.5K
# Total Submissions: 72.6K
# Testcase Example: '[-4,-2,2,4]\n1\n3\n5'
#
# Given a sorted array of integers nums and integer values a, b and c. Apply a
# quadratic function of the form f(x) = ax^2 + bx + c to each element x in the
# array.
#
# The returned array must be in sorted order.
#
# Expected time complexity: O(n)
#
#
# Example 1:
#
#
# Input: nums = [-4,-2,2,4], a = 1, b = 3, c = 5
# Output: [3,9,15,33]
#
#
#
# Example 2:
#
#
# Input: nums = [-4,-2,2,4], a = -1, b = 3, c = 5
# Output: [-23,-5,1,7]
#
#
#
#
class Solution:
def sortTransformedArray(self, nums: List[int], a: int, b: int, c: int) -> List[int]:
|
[
"kevin.wkmiao@gmail.com"
] |
kevin.wkmiao@gmail.com
|
df255a93aa6826829f42b9c50283a11d91b9f72d
|
25d2afe5d12fe58a97da7b51e23fdc55929e38f5
|
/anonymise_dataset_folder.py
|
18e10508f9b9043df95dc7985d4acfe30a7b2071
|
[] |
no_license
|
apmoore1/tdsa_comparisons
|
071396efe0c5e0bad297119d2ce48bf0c1cbb42f
|
ba613afece15239e6a38f277c455a035739f0b2d
|
refs/heads/master
| 2021-06-23T16:00:49.803589
| 2021-05-25T09:32:53
| 2021-05-25T09:32:53
| 225,565,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
import argparse
from pathlib import Path
from target_extraction.data_types import TargetTextCollection
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__ == '__main__':
save_dir_help = ('File Path to directory where the anonymised results '
'will be saved.')
results_dir_help = ('File path to the directory that currently stores all '
'results')
parser = argparse.ArgumentParser()
parser.add_argument("results_dir", type=parse_path, help=results_dir_help)
parser.add_argument("save_dir", type=parse_path, help=save_dir_help)
args = parser.parse_args()
save_dir = args.save_dir
results_dir = args.results_dir
save_dir.mkdir(parents=True, exist_ok=True)
dataset_names = ['election', 'laptop', 'restaurant']
split_names = ['train', 'val', 'test']
for dataset_name in dataset_names:
dataset_result_folder = Path(results_dir, f'{dataset_name}_dataset')
save_dataset_folder = Path(save_dir, f'{dataset_name}_dataset')
save_dataset_folder.mkdir(parents=True, exist_ok=True)
for split_name in split_names:
split_fp = Path(dataset_result_folder, f'{split_name}.json')
split_dataset = TargetTextCollection.load_json(split_fp)
split_dataset: TargetTextCollection
split_dataset.anonymised = True
save_fp = Path(save_dataset_folder, f'{split_name}.json')
split_dataset.to_json_file(save_fp, include_metadata=True)
|
[
"andrew.p.moore94@gmail.com"
] |
andrew.p.moore94@gmail.com
|
b223c25deb92c2584942abe587611364fd6452fb
|
47c39800fa6f928e0d13f26727ba52bda2aa6ff0
|
/venv/Lib/site-packages/aliyunsdkrds/request/v20140815/DescribeDBInstanceNetInfoRequest.py
|
aaaad8b6f364419423cb88b61323a126dae4413a
|
[
"MIT"
] |
permissive
|
dddluke/zhihuipingtai
|
952ed5f9a4011cb4fb2765a0571c978af784d708
|
4e46e01440f8c270c05259ac0f38bd56dd04016c
|
refs/heads/master
| 2023-03-09T03:32:47.807760
| 2021-02-26T02:36:10
| 2021-02-26T02:36:10
| 341,816,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,765
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstanceNetInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstanceNetInfo','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Flag(self):
return self.get_query_params().get('Flag')
def set_Flag(self,Flag):
self.add_query_param('Flag',Flag)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DBInstanceNetRWSplitType(self):
return self.get_query_params().get('DBInstanceNetRWSplitType')
def set_DBInstanceNetRWSplitType(self,DBInstanceNetRWSplitType):
self.add_query_param('DBInstanceNetRWSplitType',DBInstanceNetRWSplitType)
|
[
"lukeli0306@gmail.com"
] |
lukeli0306@gmail.com
|
a0ed35e6fc27549ff2800fb20c22fb6efdff37bf
|
ce3499f5d09396e72151f4d742a5562ebdb127c3
|
/godap.py
|
367a6d0ef7f1af07ba0250eebd821485442e3696
|
[] |
no_license
|
jscouet/godap
|
08fb0e020b95e00e92f3a94df3761cc965dda52d
|
e0dda750aba60ad59dfb7183e9beead62712dbe4
|
refs/heads/master
| 2021-01-01T04:06:41.399768
| 2016-05-24T15:35:37
| 2016-05-24T15:35:37
| 59,554,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,244
|
py
|
#!/usr/bin/python3
"""
script d'envoie de user dans LDAP
"""
VERSION = 0.2
import logging
logging.basicConfig(filename='client_application.log', level=logging.DEBUG)
from ldap3 import Server, Connection, SUBTREE
from ldap3.utils.log import set_library_log_detail_level, OFF, BASIC, NETWORK, EXTENDED, PROTOCOL
import argparse
import mods.ldap
set_library_log_detail_level(BASIC)
server='ldap-130a'
dn_connect='cn=admin,dc=edelia,dc=net'
dn_pass='M0j@ve3'
con=''
users={}
#pp.get_users()
#exit()
LDAP_USER_BRANCH='ou=people,dc=edelia,dc=net'
LDAP_GROUP_BRANCH='ou=groups,dc=edelia,dc=net'
LDAP_PTF_GROUP_BRANCH='ou=ptf,ou=groups,dc=edelia,dc=net'
GROUPS = { "dev" : "4000" ,
"dep" : "900" ,
"recette" : "2000",
"int" : "2500"
}
def read_config_file() :
config_file = 'godap.ini'
def usage() :
print ("godap -u user -G primary_group -g grp1,grp2 -m home_directory")
def connexion():
global con
print ("methode de connexion")
con=Connection(server, user=dn_connect , password=dn_pass)
if not con.bind():
print("error de connexion a {}".format(server) )
def get_user():
global con
groups_ids = []
print ("methode de get des users")
con.search(search_base='dc=edelia,dc=net',
search_filter = '(objectClass=person)',
search_scope = SUBTREE ,
attributes= ['*'] )
#for entry in con.response:
# print(entry['dn'], entry['attributes'])
#for entry in con.response:
#print(entry['dn'], entry['attributes'])
users = con.response
#print ( att )
#print (users)
return users
for i in users :
#print ( i )
print ("sn est : {}".format( i ) )
#for j in i.keys() :
#print ("j: {}".format(j) )
dn = i["dn"]
print ( dn )
def get_group_dn(group) :
global con
filter = ""
filter = '(cn=' + group +')'
print(filter)
con.search(search_base='ou=groups,dc=edelia,dc=net',
search_filter = filter,
search_scope = SUBTREE )
#attributes= ['*'] )
list_of_groups_dn = con.response
print (con.response[0]["dn"])
#def send_ldap_user_group():
#def send_ldap_request() :
if __name__ == "__main__" :
"""
parsage des arguments
"""
user_name=""
primary_group=""
groups=[]
parser = argparse.ArgumentParser(description='commend line for adding a user in LDAP')
parser.add_argument('-u', action='store', default="none" , dest='user',help='name of the user to add')
parser.add_argument('-n', action='store', default="none" , dest='name',help='name of the user to add')
parser.add_argument('-G', action='store', default="none", dest='primary_group',help='name of the primary group of the user')
parser.add_argument('-g', action='append', default=[], dest='groups',help='group to add to the user')
#parser.add_argument('count', action='store')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
print ("options : user : {} {}, group primary : {} , groups : {}".format(results.user , results.name , results.primary_group , results.groups ))
#connexion()
ls = mods.ldap.ldap(server, dn_connect, dn_pass)
#
get_group_dn("ptf24")
exit()
send_ldap_user_creation( results.user , results.primary_group , results.groups , results.name )
exit()
users = get_user()
get_last_user_uid_from_group(users,"dev")
|
[
"gogs@fake.local"
] |
gogs@fake.local
|
5f5c2d5fd25e62e92717b420b164f974576182c3
|
3539d0e3ddd7849a14876e95f0332428ec28ebf7
|
/Data Scientist Career Path/3. Python Fundamentals/6. Python Loop/2. List Comprehension Code Challenge/18. ages.py
|
e5fdc328f0eaafe3e447ed0b4cf91a74ee9f7ef5
|
[
"MIT"
] |
permissive
|
DincerDogan/Data-Science-Learning-Path
|
ff146de2cf4ebc5fedfa9377babf959208dfe7e6
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
refs/heads/main
| 2023-05-08T10:53:47.449974
| 2021-06-06T21:27:31
| 2021-06-06T21:27:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
names = ["Shilah", "Arya", "Kele"]
ages = [14, 9, 35]
users = ["Name: " + x[0] + ", Age: " + str(x[1]) for x in zip(names, ages)]
|
[
"aristyanto2320@gmail.com"
] |
aristyanto2320@gmail.com
|
1347a718ac0fe7a1d32610c9e403b982b3bd4506
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
|
9f445458c8365c10abb325f59a583923ce416a73
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971
| 2021-10-16T15:00:19
| 2021-10-16T15:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
_base_ = "./vfnet_r50_fpn_mstrain_2x_coco.py"
model = dict(
backbone=dict(
type="ResNeXt",
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
init_cfg=dict(type="Pretrained", checkpoint="open-mmlab://resnext101_32x4d"),
)
)
|
[
"hojihun5516@daum.net"
] |
hojihun5516@daum.net
|
3a68029f539876708281d23051be8c94c328cd85
|
3171b42757e60a61c2a71474cb07f81b9219ee50
|
/tests/test_upload.py
|
ebf1b3cf99f8570e73ec3366ba494fdd4c1da8d1
|
[] |
no_license
|
yoophi/flaskygram
|
cde431d1a177e5bf54558ab83bb07886b93bc9ed
|
363e29f393ab3e1b8d40102add09b1f9bff65670
|
refs/heads/main
| 2023-05-14T15:11:05.307578
| 2021-06-05T06:55:53
| 2021-06-05T06:55:53
| 57,872,879
| 0
| 0
| null | 2023-05-02T20:26:02
| 2016-05-02T07:40:40
|
Python
|
UTF-8
|
Python
| false
| false
| 695
|
py
|
"""
TestCase migration with Flask-Testing
"""
from flask import url_for
from cStringIO import StringIO
from flaskygram.models import Media
from tests import BaseTestCase
class MediaTest(BaseTestCase):
def test_upload(self):
res = self.client.post(url_for('api.media_upload'),
data=dict(
file=(StringIO("123456789 " * 1000), 'test.png'),
),
headers={
'Authorization': 'Bearer %s' % self.get_oauth2_token()
})
self.assert200(res)
self.assertEqual(1, Media.query.count())
|
[
"yoophi@gmail.com"
] |
yoophi@gmail.com
|
1bdad74646ded2dbe5d41d72449d186dbcc5986a
|
fa9bae32c203323dfb345d9a415d4eaecb27a931
|
/492. Construct the Rectangle.py
|
223ca17420f3b701265528b09aa68d0b6bc71233
|
[] |
no_license
|
IUIUN/The-Best-Time-Is-Now
|
48a0c2e9d449aa2f4b6e565868a227b6d555bf29
|
fab660f98bd36715d1ee613c4de5c7fd2b69369e
|
refs/heads/master
| 2020-09-14T12:06:24.074973
| 2020-02-15T06:55:08
| 2020-02-15T06:55:08
| 223,123,743
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
class Solution:
def constructRectangle(self, area: int) -> List[int]:
mid = int(math.sqrt(area))
while mid > 0:
if area % mid == 0:
return [int(area // mid), int(mid)]
mid -= 1
|
[
"liuyijun0621@hotmail.com"
] |
liuyijun0621@hotmail.com
|
3a335ae6cc7bab264be2c947c01ec8df7a6e941e
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/age-range-compatibility-equation/Python/solution1.py
|
3657a02d2a92ac4c93199e60e1489bc9fdc04b1a
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
# Python - 3.4.3
from math import floor
def dating_range(age):
minage = lambda a: (a / 2) + 7 if a > 14 else 0.9 * a
maxage = lambda a: (a - 7) * 2 if a > 14 else 1.1 * a
return '%d-%d' % (floor(minage(age)), floor(maxage(age)))
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
d55ce02404f32b0877e1a295d5b671b302e5db52
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/pageobjects/widgets/members_profile_mailpreference.py
|
cdbdc54f4a0ca509c4b8a5bcbc0be220497cb01e
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715
| 2015-09-29T16:11:18
| 2015-09-29T16:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,363
|
py
|
from hubcheck.pageobjects.basepageelement import Radio
from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileMailPreference1(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileMailPreference1,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileMailPreference_Locators = self.load_class('MembersProfileMailPreference_Locators')
# update this object's locator
self.locators.update(MembersProfileMailPreference_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.mailpreference = Radio(self,{'Yes':'mail_yes','No':'mail_no'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary of the mailpreference and access values"""
return {'mailpreference' : self.mailpreference.value(),
'access' : self.access.value()}
def update(self,mailpreference=None,access=None):
"""update the mailpreference and access values"""
if mailpreference != None:
self.mailpreference.value = mailpreference
if access != None:
self.access.value = access
self.save.click()
class MembersProfileMailPreference1_Locators_Base(object):
"""locators for MembersProfileMailPreference2 object"""
locators = {
'base' : "css=.profile-optin",
'mail_yes' : "css=#mailPreferenceOptionYes",
'mail_no' : "css=#mailPreferenceOptionNo",
'access' : "css=.profile-optin select[name='access[optin]']",
'sectionkey' : "css=.profile-optin .key",
'sectionvalue' : "css=.profile-optin .value",
'open' : "css=.profile-optin .edit-profile-section",
'close' : "css=.profile-optin .edit-profile-section",
'save' : "css=.profile-optin .section-edit-submit",
'cancel' : "css=.profile-optin .section-edit-cancel",
}
class MembersProfileMailPreference2(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileMailPreference2,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileMailPreference_Locators = self.load_class('MembersProfileMailPreference_Locators')
# update this object's locator
self.locators.update(MembersProfileMailPreference_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.mailpreference = Select(self,{'base':'mailpref'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary of the mailpreference and access values"""
return {'mailpreference' : self.mailpreference.value(),
'access' : self.access.value()}
def update(self,mailpreference=None,access=None):
"""update the mailpreference and access values"""
if mailpreference != None:
self.mailpreference.value = mailpreference
if access != None:
self.access.value = access
self.save.click()
class MembersProfileMailPreference2_Locators_Base(object):
"""locators for MembersProfileMailPreference2 object"""
locators = {
'base' : "css=.profile-optin",
'mailpref' : "css=.profile-optin select[name='mailPreferenceOption']",
'access' : "css=.profile-optin select[name='access[optin]']",
'sectionkey' : "css=.profile-optin .key",
'sectionvalue' : "css=.profile-optin .value",
'open' : "css=.profile-optin .edit-profile-section",
'close' : "css=.profile-optin .edit-profile-section",
'save' : "css=.profile-optin .section-edit-submit",
'cancel' : "css=.profile-optin .section-edit-cancel",
}
|
[
"telldsk@gmail.com"
] |
telldsk@gmail.com
|
a9a1d3f8224ac0897447a887e426b0391bef5786
|
788f1d32045560ffafff468476c9c9897dabb31c
|
/Curso em Vídeo/Mundo 1 Fundamentos/Desafios/desafio019.py
|
9a32db158cf6b88cdf869f23df5dd89ebfe57efa
|
[
"MIT"
] |
permissive
|
henriqueumeda/-Python-study
|
7f8d911e9e724aa2f183e652e6a7ae31b742b90e
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
refs/heads/main
| 2023-08-10T21:10:32.360808
| 2021-09-21T02:37:16
| 2021-09-21T02:37:16
| 330,294,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
from random import choice
primeiroAluno = input('Primeiro aluno: ')
segundoAluno = input('Segundo aluno: ')
terceiroAluno = input('Terceiro aluno: ')
quartoAluno = input('Quarto aluno: ')
choiceList = [primeiroAluno, segundoAluno, terceiroAluno, quartoAluno]
print('O aluno escolhido foi {}'.format(choice(choiceList)))
|
[
"issamuumeda@gmail.com"
] |
issamuumeda@gmail.com
|
de4d019cf164022898f2466c7ffb7d80085142ed
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/models/Paddle2ONNX/ocr_v2/main_test.py
|
5e669c2b5e142253e0524cf4ddc5edf09bee4f17
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
main test
"""
import os
import shutil
import platform
class OcrV2Test(object):
"""
test Ocr to onnx tipc
"""
def __init__(self):
if os.path.exists("tipc_models_url_PaddleOCR_latest.txt"):
os.remove("tipc_models_url_PaddleOCR_latest.txt")
self.txt_url = (
"https://paddle-qa.bj.bcebos.com/fullchain_ce_test/"
"model_download_link/tipc_models_url_PaddleOCR_latest.txt"
)
os.system("wget -q --no-proxy {}".format(self.txt_url))
self.model_url_list = []
for line in open("tipc_models_url_PaddleOCR_latest.txt"):
self.model_url_list.append(line)
self.opset_v_list = [11, 12]
self.ignore_model = [
"ch_PP-OCRv3_rec_PACT",
"rec_mtb_nrtr",
"rec_mv3_tps_bilstm_att_v2.0",
"rec_mv3_tps_bilstm_ctc_v2.0",
"rec_r34_vd_tps_bilstm_att_v2.0",
"rec_r34_vd_tps_bilstm_ctc_v2.0",
]
def prepare_resource(self, tgz_url):
"""
prepare resource and pytest code
"""
tgz = tgz_url[tgz_url.rfind("/") + 1 : -1]
time_stamp = tgz[0 : tgz.find("^")]
tmp = tgz.replace(time_stamp + "^", "")
repo = tmp[0 : tmp.find("^")]
tmp = tgz.replace(time_stamp + "^" + repo + "^", "")
model_name = tmp[0 : tmp.find("^")]
model_path = model_name + "_upload"
tmp = tgz.replace(time_stamp + "^" + repo + "^" + model_name + "^", "")
paddle_commit = tmp[0 : tmp.find("^")]
tmp = tgz.replace(time_stamp + "^" + repo + "^" + model_name + "^" + paddle_commit + "^", "")
repo_commit = tmp[0 : tmp.find(".")]
str_all = ""
for opset_v in self.opset_v_list:
tmp = (
"def test_opt_v{}():\n"
' """test {} opt version {}"""\n'
" logging.info('time stamp: {} !!!')\n"
" logging.info('model name: {} !!!')\n"
" logging.info('paddle commit: {} !!!')\n"
" logging.info('repo commit: {} !!!')\n"
" unit_exit_code = os.system(\n"
' "paddle2onnx --model_dir={} "\n'
' "--model_filename=inference.pdmodel "\n'
' "--params_filename=inference.pdiparams "\n'
' "--save_file={} "\n'
' "--opset_version={} --enable_onnx_checker=True"\n'
" )\n"
" assert unit_exit_code == 0\n"
"\n"
"\n".format(
opset_v,
model_name,
opset_v,
time_stamp,
model_name,
paddle_commit,
repo_commit,
model_path,
os.path.join(model_path, "inference.onnx"),
opset_v,
)
)
str_all += tmp
case_name = model_name.replace(".", "_")
case_name = case_name.replace("-", "_")
with open("test_{}.py".format(case_name), "w") as f:
f.write(
"#!/bin/env python\n"
"# -*- coding: utf-8 -*-\n"
"# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n"
'"""\n'
"test {} to onnx\n"
'"""\n'
"import os\n"
"import logging\n"
"\n"
"\n".format(model_name)
)
f.write(str_all)
os.system("wget -q --no-proxy {}".format(tgz_url))
os.system("tar -xzf {}".format(tgz))
return tgz, case_name, model_path, model_name
def run(self):
"""
run test
"""
for tgz_url in self.model_url_list:
tgz, case_name, model_path, model_name = self.prepare_resource(tgz_url)
if model_name not in self.ignore_model:
if platform.system() == "Windows":
os.system("python.exe -m pytest {} --alluredir=report".format("test_" + case_name + ".py"))
else:
os.system("python -m pytest {} --alluredir=report".format("test_" + case_name + ".py"))
os.remove(tgz)
shutil.rmtree(model_path)
if __name__ == "__main__":
test = OcrV2Test()
test.run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
ed2aff3a66672c8a682fc5200fdd435cdc11b20a
|
7386480f7e09101b0518cf1f92b5902726c8816e
|
/RTMinuit/minuit_html.py
|
46e3db8756e812009af255ede952105be3242ba4
|
[] |
no_license
|
mattbellis/RTMinuit
|
51458bc9fd5a83895eba959c44895826e070741b
|
d67c35b9aaf904bed278d4ce237cc2e6cf9485dc
|
refs/heads/master
| 2020-12-25T03:00:43.769380
| 2012-05-21T19:09:56
| 2012-05-21T19:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,872
|
py
|
class MinuitHTMLResult:
def __init__(self,m):
"""
:param m:
:type m Minuit:
"""
self.varnames = m.varname
self.values = m.values
self.errors = m.errors
self.mnerrors = m.minos_errors()
def _repr_html_(self):
tmp = []
header = u"""<tr>
<td></td>
<td>Parameter</td>
<td>Value</td>
<td>Parab Error</td>
<td>Minos Error-</td>
<td>Minos Error+</td>
</tr>"""
keys = sorted(self.values)
for i,k in enumerate(self.varnames):
val = self.values[k]
err = self.errors[k]
mnp = self.mnerrors[k].eplus
mnm = self.mnerrors[k].eminus
varno = i+1
line = u"""<tr>
<td align="right">{varno:d}</td>
<td align="left">{k}</td>
<td align="right">{val:e}</td>
<td align="left"> ±{err:e}</td>
<td align="left">{mnm:e}</td>
<td align="left">+{mnp:e}</td>
</tr>""".format(**locals())
tmp.append(line)
ret = '<table>%s\n%s\n</table>'%(header,'\n'.join(tmp))
return ret
class MinuitCorrelationMatrixHTML:
def __init__(self,m):
self.matrix = m.error_matrix(True)
self.params = m.list_of_vary_param()
self.nparams = len(self.params)
assert(self.matrix.shape==(self.nparams,self.nparams))
def style(self,val):
return 'background-color:%s'%Gradient.rgb_color_for(val)
def _repr_html_(self):
header = ''
for i in range(self.nparams):
header+='<td style="text-align:left"><div style="-webkit-writing-mode:vertical-rl;">%s</div></td>\n'%self.params[i]
header = '<tr><td></td>\n%s</tr>\n'%header
lines = list()
for i in range(self.nparams):
line = '<td>%s</td>'%self.params[i]
for j in range(self.nparams):
style = self.style(self.matrix[i,j])
line+='<td style="%s">%4.2f</td>\n'%(style,self.matrix[i,j])
line = '<tr>\n%s</tr>\n'%line
lines.append(line)
ret = '<table>\n%s%s</table>\n'%(header,''.join(lines))
return ret
class Gradient:
#A3FEBA pastel green
#FF7575 pastel red
#from http://code.activestate.com/recipes/266466-html-colors-tofrom-rgb-tuples/
@classmethod
def color_for(cls,v,min=0.,max=1.,startcolor=(163,254,186),stopcolor=(255,117,117)):
c = [0]*3
for i,sc in enumerate(startcolor):
c[i] = round(startcolor[i] + 1.0*(v-min)*(stopcolor[i]-startcolor[i])/(max-min))
return tuple(c)
@classmethod
def rgb_color_for(cls,v):
c = cls.color_for(abs(v))
return 'rgb(%d,%d,%d)'%c
|
[
"piti118@gmail.com"
] |
piti118@gmail.com
|
f1c9b9076961020c81cbaf3646daba66e73e364d
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/Video_future_frame_prediction/fc_lstm_n202_ubu/old_code/old_code_v0002/models/lstmcell_simple_encoder_decoder.py
|
95a2bc4e593826921cf1006deb6946f36735942c
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131
| 2020-01-26T21:47:23
| 2020-01-26T21:47:23
| 163,707,778
| 0
| 0
| null | 2022-12-27T15:37:48
| 2019-01-01T01:58:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
# encoder decoder, only 1 layer, flatten style
# lstmcell
# v0004
import torch
import torch.nn as nn
import numpy as np
class FC_LSTM(nn.Module):
def __init__(self):
super(FC_LSTM, self).__init__()
self.encoder_lstmcell = nn.LSTMCell(4096, 4096)
self.decoder_lstmcell = nn.LSTMCell(4096, 4096)
def forward(self, x, future_step=10):
# x in is [seq=10, batch, 64, 64]
device = next(self.parameters()).device
seq_size = x.shape[0]
batch_size = x.shape[1]
h_e = torch.zeros((batch_size, 4096)).to(device)
c_e = torch.zeros((batch_size, 4096)).to(device)
x = x.reshape((seq_size, batch_size, -1))
# print(x.shape)
for seq in range(seq_size):
h_e, c_e = self.encoder_lstmcell(x[seq], (h_e, c_e))
# print(h_e.shape)
h_d = h_e
c_d = torch.zeros((batch_size, 4096)).to(device)
zero_input = torch.zeros((batch_size, 4096)).to(device)
outputs = []
for seq in range(future_step):
h_d, c_d = self.decoder_lstmcell(zero_input, (h_d, c_d))
outputs.append(h_d)
outputs = torch.stack(outputs)
outputs = torch.reshape(outputs, (seq_size, batch_size, 64, 64))
return outputs
if __name__ == "__main__":
model = FC_LSTM()
x = torch.randn((10, 100, 64, 64))
x = model(x)
print(x.shape)
|
[
"geemguang@gmail.com"
] |
geemguang@gmail.com
|
d7fff55e5019e9289ac54ee81bc8c3b1546d4e16
|
91a9f5a7afb398f4238527708cbc155dc972cbfa
|
/data_analysis/third_party_code/rrt.py
|
081eec2a0241365b13ab63425ad28e696f407a8b
|
[] |
no_license
|
bddmodelcar/kzpy3.2
|
cd6f9bf6b7b8b920c79b4ee36c2592b992ae4332
|
b044b26649b19b240bd580feca20424a237374b1
|
refs/heads/master
| 2021-01-19T21:01:58.687712
| 2017-08-23T22:39:56
| 2017-08-23T22:39:56
| 101,243,308
| 0
| 1
| null | 2017-08-24T02:04:50
| 2017-08-24T02:04:50
| null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
#!/usr/bin/env python
# rrt.py
# This program generates a simple rapidly
# exploring random tree (RRT) in a rectangular region.
#
# Written by Steve LaValle
# May 2011
import sys, random, math, pygame
from pygame.locals import *
from math import sqrt, cos, sin, atan2
# constants
XDIM = 640
YDIM = 480
WINSIZE = [XDIM, YDIM]
EPSILON = 7.0
NUMNODES = 5000
def dist(p1, p2):
return sqrt((p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1]))
def step_from_to(p1, p2):
if dist(p1, p2) < EPSILON:
return p2
else:
theta = atan2(p2[1] - p1[1], p2[0] - p1[0])
return p1[0] + EPSILON * cos(theta), p1[1] + EPSILON * sin(theta)
def main():
# initialize and prepare screen
pygame.init()
screen = pygame.display.set_mode(WINSIZE)
pygame.display.set_caption('RRT S. LaValle May 2011')
white = 255, 240, 200
black = 20, 20, 40
screen.fill(black)
nodes = []
nodes.append((XDIM / 2.0, YDIM / 2.0)) # Start in the center
# nodes.append((0.0,0.0)) # Start in the corner
for i in range(NUMNODES):
rand = random.random() * 640.0, random.random() * 480.0
nn = nodes[0]
for p in nodes:
if dist(p, rand) < dist(nn, rand):
nn = p
newnode = step_from_to(nn, rand)
nodes.append(newnode)
pygame.draw.line(screen, white, nn, newnode)
pygame.display.update()
# print i, " ", nodes
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
sys.exit("Leaving because you requested it.")
# if python says run, then we should run
if __name__ == '__main__':
main()
|
[
"karlzipser@berkeley.edu"
] |
karlzipser@berkeley.edu
|
ec6f2aa9f46031fe2b4e7abef4c1302d1c1116dd
|
b4ea052a5c9d9602ac7a3d7c3d341ef13c0f7b64
|
/tuple.py
|
9beabaddaeeddf86af319b30c7863c3ff432841a
|
[] |
no_license
|
iehoshia/math
|
e74409d68ebd60d8deb1c4a41b0dc0dd96772b94
|
d9139fd7de15a10230fc3d76c0f57c5f66be66ef
|
refs/heads/master
| 2021-06-27T08:37:48.210371
| 2020-07-05T18:01:48
| 2020-07-05T18:01:48
| 221,271,881
| 0
| 0
| null | 2021-03-20T03:08:07
| 2019-11-12T17:16:35
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
msg = "('UserError', ('El campo de es obligatorio.', ''))"
print(type(msg))
ts = eval(msg)
print(type(ts))
print(ts[0])
print(ts[1][0])
|
[
"jepgez@gmail.com"
] |
jepgez@gmail.com
|
e950a96f99338a6581c70f7a239071ec9857cc9a
|
2876a5a8e7d50d97039b4e63c25f5eaf1cc20808
|
/src/odontology/person/migrations/0006_remove_patient_reciently_added.py
|
fa65de12e835d148fed6cd7e7a0620ba29ebeb5c
|
[
"Apache-2.0"
] |
permissive
|
nanomolina/JP
|
6fcd01b75d71aa560781d4c0350ff76025f85f92
|
248a47bced4dac850f85d28968ddf279cd123400
|
refs/heads/master
| 2022-11-29T09:31:43.449654
| 2019-07-16T18:25:20
| 2019-07-16T18:25:20
| 51,620,989
| 2
| 0
|
Apache-2.0
| 2022-11-22T01:11:25
| 2016-02-12T22:33:24
|
HTML
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 08:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('person', '0005_auto_20160214_0517'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='reciently_added',
),
]
|
[
"nanomolinacav@gmail.com"
] |
nanomolinacav@gmail.com
|
30f8c99090912bd768c74d849e02e4c932b3ae47
|
2c872fedcdc12c89742d10c2f1c821eed0470726
|
/pbase/day03/jiangyi/day03/code/str2.py
|
414e203cb3fc038b2e548595d370a0630461fc59
|
[] |
no_license
|
zuigehulu/AID1811
|
581c3c7a37df9fa928bc632e4891fc9bafe69201
|
10cab0869875290646a9e5d815ff159d0116990e
|
refs/heads/master
| 2020-04-19T16:33:04.174841
| 2019-01-30T07:58:24
| 2019-01-30T07:58:24
| 168,307,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
# str.py
print('ABC\n123')
print('ABC\t123')
print("ABCDE\rab")
print("ABCDE\b\babcd")
print("==ABCD==")
print("==\x41\x42\x43\x44==") # ABCD
print('\x68\x65\x6c\x6c\x6f')
print("hello")
|
[
"442315617@qq.com"
] |
442315617@qq.com
|
701edda3fc08ca5e204e91994d360e36880706aa
|
532a28255249530c98eea8985cdcfb093dbf28b1
|
/testing/test_boxed.py
|
bee936735f1ff0a109346bf20f8150639e74bb0f
|
[
"MIT"
] |
permissive
|
lukas-bednar/pytest-xdist-convert
|
8dc09ebae810344f8ebf031b3d04de4e9a62602c
|
255c0617159b611eaa94b80c7b61568c5c8ce082
|
refs/heads/master
| 2021-01-23T12:18:04.783539
| 2015-09-01T15:28:21
| 2015-09-01T15:28:21
| 41,746,483
| 1
| 0
| null | 2015-09-01T15:21:30
| 2015-09-01T15:21:30
| null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
import pytest
import os
needsfork = pytest.mark.skipif(not hasattr(os, "fork"),
reason="os.fork required")
@needsfork
def test_functional_boxed(testdir):
p1 = testdir.makepyfile("""
import os
def test_function():
os.kill(os.getpid(), 15)
""")
result = testdir.runpytest(p1, "--boxed")
result.stdout.fnmatch_lines([
"*CRASHED*",
"*1 failed*"
])
@needsfork
@pytest.mark.parametrize("capmode", [
"no",
pytest.mark.xfail("sys", reason="capture cleanup needed"),
pytest.mark.xfail("fd", reason="capture cleanup needed")])
def test_functional_boxed_capturing(testdir, capmode):
p1 = testdir.makepyfile("""
import os
import sys
def test_function():
sys.stdout.write("hello\\n")
sys.stderr.write("world\\n")
os.kill(os.getpid(), 15)
""")
result = testdir.runpytest(p1, "--boxed", "--capture=%s" % capmode)
result.stdout.fnmatch_lines("""
*CRASHED*
*stdout*
hello
*stderr*
world
*1 failed*
""")
class TestOptionEffects:
def test_boxed_option_default(self, testdir):
tmpdir = testdir.tmpdir.ensure("subdir", dir=1)
config = testdir.parseconfig()
assert not config.option.boxed
pytest.importorskip("execnet")
config = testdir.parseconfig('-d', tmpdir)
assert not config.option.boxed
def test_is_not_boxed_by_default(self, testdir):
config = testdir.parseconfig(testdir.tmpdir)
assert not config.option.boxed
|
[
"holger@merlinux.eu"
] |
holger@merlinux.eu
|
3b71545bbfa3cf0b81dc43c505f9b0ee6d8e556f
|
5ded398a05f59f08f2add076fa50e42bfcb5cc92
|
/home/migrations/0002_load_initial_data.py
|
3fb748150d45554121b7e71e3ea15175052fd015
|
[] |
no_license
|
crowdbotics-apps/vool-22192
|
1dc764992f07ce542f6691693503375007175a6d
|
d8c48223d5df02577fe997f6a50fdbd83eb96dce
|
refs/heads/master
| 2023-01-09T11:42:10.989994
| 2020-11-01T19:20:38
| 2020-11-01T19:20:38
| 309,170,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "vool"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">vool</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "vool-22192.botics.co"
site_params = {
"name": "vool",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
21aa8958edca79602fd8a9062a0ca2fb5d5a8527
|
c06d18ac5b87b3b82fc486454c422b119d6c1ee9
|
/src/demo/_tensorflow/notebooks/2_BasicModels/linear_regression.py
|
db1a4d9133a9a5e089a49002de6f171371fd87ae
|
[
"MIT"
] |
permissive
|
tangermi/nlp
|
b3a4c9612e6049463bf12bc9abb7aff06a084ace
|
aa36b8b20e8c91807be73a252ff7799789514302
|
refs/heads/master
| 2022-12-09T12:33:15.009413
| 2020-04-03T04:03:24
| 2020-04-03T04:03:24
| 252,056,010
| 0
| 0
| null | 2022-12-08T07:26:55
| 2020-04-01T02:55:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
# -*-coding:utf-8-*-
from __future__ import absolute_import, division, print_function
# %%
import tensorflow as tf
import numpy as np
rng = np.random
# %%
# Parameters.
learning_rate = 0.01#学习率
training_steps = 1000#总共训练次数
display_step = 50
# %%
# Training Data.
X = np.array([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
Y = np.array([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = X.shape[0]
# %%
# Weight and Bias, initialized randomly.
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Linear regression (Wx + b).
def linear_regression(x):
return W * x + b
# Mean square error.
def mean_square(y_pred, y_true):
return tf.reduce_sum(tf.pow(y_pred - y_true, 2)) / (2 * n_samples)
# Stochastic Gradient Descent Optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# %%
# Optimization process.
def run_optimization():
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = linear_regression(X)
loss = mean_square(pred, Y)
# Compute gradients.
gradients = g.gradient(loss, [W, b])#计算loss函数的梯度,沿着该梯度向量的方向可以使函数函数的减小最多
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, [W, b]))#学习率决定移动的方向,梯度决定移动的方向,总体可以将参数[w,b]向使得函数loss减小最快的方向移动相应的距离
# %%
# Run training for the given number of steps.
for step in range(1, training_steps + 1):
# Run the optimization to update W and b values.
run_optimization()
if step % display_step == 0:
pred = linear_regression(X)
loss = mean_square(pred, Y)
print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, W.numpy(), b.numpy()))
# %%
import matplotlib.pyplot as plt
# %%
# Graphic display
plt.plot(X, Y, 'ro', label='Original data')
plt.plot(X, np.array(W * X + b), label='Fitted line')
plt.legend()
plt.show()
|
[
"n10057862@qut.edu.au"
] |
n10057862@qut.edu.au
|
0584bb10c38dba33bffbd8942a3bc1da49985993
|
d903801965f5a203360a989c5e5330160bb8f509
|
/pragmatics_2/settings/base.py
|
2de70505263bd2f12c487534bdb5eabadcb1482f
|
[] |
no_license
|
matt700395/server_test
|
fbc63fe4a9aea29610089b3ec87dcf5834047d27
|
c21618bdf4d7f38889410d3204c15e4c61c15a54
|
refs/heads/master
| 2023-07-25T02:12:24.187475
| 2021-08-11T01:51:30
| 2021-08-11T01:51:30
| 387,237,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
"""
Django settings for pragmatics_2 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
"gkgk"
from pathlib import Path
import os, environ
from django.urls import reverse_lazy
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'accountapp',
'profileapp',
'articleapp',
'commentapp',
'projectapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pragmatics_2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pragmatics_2.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS=[
BASE_DIR / "static",
]
LOGIN_REDIRECT_URL = reverse_lazy('accountapp:hello_world')
LOGOUT_REDIRECT_URL = reverse_lazy('accountapp:login')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"matt7003@korea.ac.kr"
] |
matt7003@korea.ac.kr
|
0fa6083c10d10d0fd237b63dd89a046cd5d7ecf6
|
61efd764ae4586b6b2ee5e6e2c255079e2b01cfc
|
/azure-graphrbac/azure/graphrbac/models/graph_error.py
|
0bfa9744e63d8b4251f72750eb08d46a828b3f80
|
[
"MIT"
] |
permissive
|
AutorestCI/azure-sdk-for-python
|
a3642f53b5bf79d1dbb77851ec56f4cc0c5b3b61
|
60b0726619ce9d7baca41f6cd38f741d74c4e54a
|
refs/heads/master
| 2021-01-21T02:23:59.207091
| 2018-01-31T21:31:27
| 2018-01-31T21:31:27
| 55,251,306
| 4
| 3
| null | 2017-11-13T17:57:46
| 2016-04-01T17:48:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class GraphError(Model):
"""Active Directory error information.
:param code: Error code.
:type code: str
:param message: Error message value.
:type message: str
"""
_attribute_map = {
'code': {'key': 'odata\\.error.code', 'type': 'str'},
'message': {'key': 'odata\\.error.message.value', 'type': 'str'},
}
def __init__(self, code=None, message=None):
self.code = code
self.message = message
class GraphErrorException(HttpOperationError):
"""Server responsed with exception of type: 'GraphError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(GraphErrorException, self).__init__(deserialize, response, 'GraphError', *args)
|
[
"autorestci@microsoft.com"
] |
autorestci@microsoft.com
|
797f75d711c35e8f51c4b63311f9c718f07a6b4c
|
91b6b36c7eba4ef0f97eea76a32c297760e24034
|
/games/migrations/0001_initial.py
|
a999330944ff2ace8f32685de6c1ac1b44d755f4
|
[] |
no_license
|
joescaos/Tienda-video-juegos
|
ddfbba3affdb4302077d205d1a6b408cc08cf670
|
8b05f15d655398f7efc77af126fe022fec6d3261
|
refs/heads/main
| 2023-02-11T05:43:33.950151
| 2021-01-07T03:06:01
| 2021-01-07T03:06:01
| 327,482,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# Generated by Django 3.0.8 on 2020-07-04 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('imagen', models.ImageField(null=True, upload_to='games/')),
('descripcion', models.TextField()),
('precio', models.PositiveIntegerField()),
('categoria', models.CharField(choices=[('Juegos de accion', 'Juegos de accion'), ('Juegos de simulacion', 'Juegos de simulacion'), ('Juegos de deportes', 'Juegos de deportes'), ('Juegos de aventura', 'Juegos de aventura'), ('Juegos de plataformas', 'Juegos de plataformas'), ('Juegos de puzzle', 'Juegos de puzzle')], max_length=80, null=True)),
('existencia', models.PositiveIntegerField(null=True)),
],
),
]
|
[
"jxexcxo@gmail.com"
] |
jxexcxo@gmail.com
|
0d04fa2c27807dd543aa96a8eb34f13b5aa285a3
|
48b8ef4cb13195bd48c3bd741df407f4df7a7db1
|
/py2vega/functions/date_time.py
|
52cb563af5f98569f0a6115a6e91a96ee9e73dd4
|
[
"BSD-3-Clause"
] |
permissive
|
QuantStack/py2vega
|
7c6d1e114e97f835ae2d3ef47950680d8a9b7e55
|
049f8a89adc4197a69a384160bbbb633c61abaf8
|
refs/heads/master
| 2021-06-22T22:38:32.885585
| 2021-03-03T09:46:31
| 2021-03-03T09:46:31
| 203,118,828
| 9
| 3
|
BSD-3-Clause
| 2021-03-03T08:32:02
| 2019-08-19T07:15:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,165
|
py
|
"""Module that implements mocking Vega date and time functions."""
date_time_functions = [
'now', 'datetime', 'date', 'day', 'year', 'quarter', 'month', 'hours',
'minutes', 'seconds', 'milliseconds', 'time', 'timezoneoffset', 'utc',
'utcdate', 'utcday', 'utcyear', 'utcquarter', 'utcmonth', 'utchours',
'utcminutes', 'utcseconds', 'utcmilliseconds'
]
error_message = ' is a mocking function that is not supposed to be called directly'
def now():
"""Return the timestamp for the current time."""
raise RuntimeError('now' + error_message)
def datetime(year, month, day, hour, min, sec, millisec):
"""Return a new Date instance. The month is 0-based, such that 1 represents February."""
raise RuntimeError('datetime' + error_message)
def date(datetime):
"""Return the day of the month for the given datetime value, in local time."""
raise RuntimeError('date' + error_message)
def day(datetime):
"""Return the day of the week for the given datetime value, in local time."""
raise RuntimeError('day' + error_message)
def year(datetime):
"""Return the year for the given datetime value, in local time."""
raise RuntimeError('year' + error_message)
def quarter(datetime):
"""Return the quarter of the year (0-3): for the given datetime value, in local time."""
raise RuntimeError('quarter' + error_message)
def month(datetime):
"""Return the (zero-based): month for the given datetime value, in local time."""
raise RuntimeError('month' + error_message)
def hours(datetime):
"""Return the hours component for the given datetime value, in local time."""
raise RuntimeError('hours' + error_message)
def minutes(datetime):
"""Return the minutes component for the given datetime value, in local time."""
raise RuntimeError('minutes' + error_message)
def seconds(datetime):
"""Return the seconds component for the given datetime value, in local time."""
raise RuntimeError('seconds' + error_message)
def milliseconds(datetime):
"""Return the milliseconds component for the given datetime value, in local time."""
raise RuntimeError('milliseconds' + error_message)
def time(datetime):
"""Return the epoch-based timestamp for the given datetime value."""
raise RuntimeError('time' + error_message)
def timezoneoffset(datetime):
"""Return the timezone offset from the local timezone to UTC for the given datetime value."""
raise RuntimeError('timezoneoffset' + error_message)
def utc(year, month, day, hour, min, sec, millisec):
"""Return a timestamp for the given UTC date. The month is 0-based, such that 1 represents February."""
raise RuntimeError('utc' + error_message)
def utcdate(datetime):
"""Return the day of the month for the given datetime value, in UTC time."""
raise RuntimeError('utcdate' + error_message)
def utcday(datetime):
"""Return the day of the week for the given datetime value, in UTC time."""
raise RuntimeError('utcday' + error_message)
def utcyear(datetime):
"""Return the year for the given datetime value, in UTC time."""
raise RuntimeError('utcyear' + error_message)
def utcquarter(datetime):
"""Return the quarter of the year (0-3): for the given datetime value, in UTC time."""
raise RuntimeError('utcquarter' + error_message)
def utcmonth(datetime):
"""Return the (zero-based): month for the given datetime value, in UTC time."""
raise RuntimeError('utcmonth' + error_message)
def utchours(datetime):
"""Return the hours component for the given datetime value, in UTC time."""
raise RuntimeError('utchours' + error_message)
def utcminutes(datetime):
"""Return the minutes component for the given datetime value, in UTC time."""
raise RuntimeError('utcminutes' + error_message)
def utcseconds(datetime):
"""Return the seconds component for the given datetime value, in UTC time."""
raise RuntimeError('utcseconds' + error_message)
def utcmilliseconds(datetime):
"""Return the milliseconds component for the given datetime value, in UTC time."""
raise RuntimeError('utcmilliseconds' + error_message)
|
[
"martin.renou@gmail.com"
] |
martin.renou@gmail.com
|
ff1b3b0dca727f260be29d791c3eb863c60bb44c
|
e40111dda0ad509d474adfe4c52ae9b5525f388e
|
/show_weather/migrations/0001_initial.py
|
257aa28f000c8c31d9e0659e17bdad9c05474fe7
|
[] |
no_license
|
XeyyamSherif/Weather-App
|
2fb997fcfb5a6885ffffbf05e6ebe2127fd2bccf
|
6de019cf289ff60d299b9f1e58c1f8c04fa3517f
|
refs/heads/master
| 2023-01-23T06:57:55.655632
| 2020-12-04T20:10:42
| 2020-12-04T20:10:42
| 318,623,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
<<<<<<< HEAD
# Generated by Django 3.0.3 on 2020-03-01 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='added_cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(max_length=100)),
('added_time', models.DateField()),
],
),
]
=======
# Generated by Django 3.0.3 on 2020-03-01 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='added_cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(max_length=100)),
('added_time', models.DateField()),
],
),
]
>>>>>>> 2001d54b7f6aa08db2779480e425bd1c54579a2f
|
[
"you@example.com"
] |
you@example.com
|
a67658f0ba4e3957c2b07e683cf18f9d800d4d49
|
962b7a864f6a85d4418292be2ad3f3c58ae89400
|
/docs/conf.py
|
1f83660b90ecd881682aa1743e2a7298a72d00e1
|
[
"MIT"
] |
permissive
|
ArtusU/one_buy
|
eb415697d4d314f1a23f255b83486f75fa1f6adb
|
5d74a691f78f162eb6b16d9c3a2049043c36b0b0
|
refs/heads/master
| 2023-08-20T20:31:41.931260
| 2021-09-11T11:51:00
| 2021-09-11T11:51:00
| 405,059,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "One Buy"
copyright = """2021, Artus U"""
author = "Artus U"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
|
[
"artusrock@hotmail.com"
] |
artusrock@hotmail.com
|
aa6d9a1822835aebdd5132c6ba6a72d4ff601275
|
c49590eb7f01df37c8ec5fef00d0ffc7250fa321
|
/openapi_client/models/market_details_quote.py
|
9be5f6cec718e266059c4b82e375d42004f2e4ce
|
[] |
no_license
|
harshad5498/ks-orderapi-python
|
373a4b85a56ff97e2367eebd076f67f972e92f51
|
237da6fc3297c02e85f0fff1a34857aaa4c1d295
|
refs/heads/master
| 2022-12-09T19:55:21.938764
| 2020-09-03T05:22:51
| 2020-09-03T05:22:51
| 293,533,651
| 0
| 0
| null | 2020-09-07T13:19:25
| 2020-09-07T13:19:24
| null |
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
# coding: utf-8
"""
KS Trade API's
The version of the OpenAPI document: 1.0
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class MarketDetailsQuote(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'depth': 'list[Depth]'
}
attribute_map = {
'depth': 'depth'
}
def __init__(self, depth=None, local_vars_configuration=None): # noqa: E501
"""MarketDetailsQuote - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._depth = None
self.discriminator = None
if depth is not None:
self.depth = depth
@property
def depth(self):
"""Gets the depth of this MarketDetailsQuote. # noqa: E501
:return: The depth of this MarketDetailsQuote. # noqa: E501
:rtype: list[Depth]
"""
return self._depth
@depth.setter
def depth(self, depth):
"""Sets the depth of this MarketDetailsQuote.
:param depth: The depth of this MarketDetailsQuote. # noqa: E501
:type depth: list[Depth]
"""
self._depth = depth
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarketDetailsQuote):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarketDetailsQuote):
return True
return self.to_dict() != other.to_dict()
|
[
"thebhushanp@gmail.com"
] |
thebhushanp@gmail.com
|
a00eaff7c43f2734cc5e34167a1e3b2535928b9c
|
3b9082ed8c0717d40165f5cc520937c23e9c49c0
|
/lib/streamifiers/public_suffix.py
|
672978920b94dfef4ec6ab269c416a679a78dd1c
|
[
"BSD-3-Clause"
] |
permissive
|
CYBAI/compression-test
|
2915c0d929be4689ba0df3beb1c19d808aef1405
|
ea1306131e32f44f97b197550e7b3a5d7734ad0b
|
refs/heads/master
| 2021-03-20T07:55:10.844035
| 2019-01-19T05:11:25
| 2019-01-19T05:11:25
| 247,191,443
| 1
| 0
|
NOASSERTION
| 2020-03-14T01:31:04
| 2020-03-14T01:31:03
| null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
#!/usr/bin/env python
from collections import defaultdict
from . import BaseStreamifier, Stream
from publicsuffix import PublicSuffixList
class Streamifier(BaseStreamifier):
"""
Use the Public Suffix List <http://publicsuffix.org> to split the messages
into streams, one per direction per suffix.
"""
def __init__(self, procs):
BaseStreamifier.__init__(self, procs)
self.psl = PublicSuffixList()
def streamify(self, messages):
"""
Given a list of messages (each a req, res tuple), return a list of
Stream objects.
"""
reqs = defaultdict(list)
ress = defaultdict(list)
suffixes = []
for req, res in messages:
host = req[':host']
suffix = self.psl.get_public_suffix(host.split(":", 1)[0])
if suffix not in suffixes:
suffixes.append(suffix)
reqs[suffix].append((req, host))
ress[suffix].append((res, host))
streams = []
for suffix in suffixes:
streams.append(Stream(suffix, reqs[suffix], 'req', self.procs))
streams.append(Stream(suffix, ress[suffix], 'res', self.procs))
return streams
|
[
"mnot@mnot.net"
] |
mnot@mnot.net
|
9cc14a0d8484b7a1274d7519a78757f8a1879fbc
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res_bw/scripts/common/lib/ctypes/test/test_numbers.py
|
d22b4be81fa25957e12f9a9ea14e9e9cab2b4b56
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 6,859
|
py
|
# 2015.11.18 12:02:44 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/ctypes/test/test_numbers.py
from ctypes import *
import unittest
import struct
def valid_ranges(*types):
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, ('\x00' * 32)[:size])[0]
b = struct.unpack(fmt, ('\xff' * 32)[:size])[0]
c = struct.unpack(fmt, ('\x7f' + '\x00' * 32)[:size])[0]
d = struct.unpack(fmt, ('\x80' + '\xff' * 32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte,
c_ushort,
c_uint,
c_ulong]
signed_types = [c_byte,
c_short,
c_int,
c_long,
c_longlong]
bool_types = []
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
try:
c_bool
except NameError:
pass
else:
bool_types.append(c_bool)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
bool_values = [True,
False,
0,
1,
-1,
5000,
'test',
[],
[1]]
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
for t in signed_types + unsigned_types + float_types:
self.assertEqual(t().value, 0)
def test_unsigned_values(self):
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_signed_values(self):
for t, (l, h) in zip(signed_types, signed_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_bool_values(self):
from operator import truth
for t, v in zip(bool_types, bool_values):
self.assertEqual(t(v).value, truth(v))
def test_typeerror(self):
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, '')
self.assertRaises(TypeError, t, None)
return
def test_from_param(self):
for t in signed_types + unsigned_types + float_types:
self.assertEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
for t in signed_types + unsigned_types + float_types + bool_types:
parm = byref(t())
self.assertEqual(ArgType, type(parm))
def test_floats(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
for t in float_types:
self.assertEqual(t(2.0).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(2L).value, 2.0)
self.assertEqual(t(f).value, 2.0)
def test_integers(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
class IntLike(object):
def __int__(self):
return 2
i = IntLike()
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
self.assertRaises(TypeError, t, f)
self.assertEqual(t(i).value, 2)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types + bool_types:
try:
size = struct.calcsize(t._type_)
except struct.error:
continue
self.assertEqual(sizeof(t), size)
self.assertEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_
align = struct.calcsize('c%c' % code) - struct.calcsize(code)
self.assertEqual((code, alignment(t)), (code, align))
self.assertEqual((code, alignment(t())), (code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v), t)
a[0] = 42
self.assertEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
a[0] = 2.3456e+17
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertIs(type(v), c_char)
a[0] = '?'
self.assertEqual(v.value, a[0])
def test_init(self):
self.assertRaises(TypeError, c_int, c_long(42))
def test_float_overflow(self):
import sys
big_int = int(sys.float_info.max) * 2
for t in float_types + [c_longdouble]:
self.assertRaises(OverflowError, t, big_int)
if hasattr(t, '__ctype_be__'):
self.assertRaises(OverflowError, t.__ctype_be__, big_int)
if hasattr(t, '__ctype_le__'):
self.assertRaises(OverflowError, t.__ctype_le__, big_int)
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = 'i'
__slots__ = []
def run_test(rep, msg, func, arg = None):
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg)
func(arg)
func(arg)
func(arg)
func(arg)
stop = clock()
else:
start = clock()
for i in items:
func()
func()
func()
func()
func()
stop = clock()
print '%15s: %.2f us' % (msg, (stop - start) * 1000000.0 / 5 / rep)
return
def check_perf():
from ctypes import c_int
REP = 200000
run_test(REP, 'int()', int)
run_test(REP, 'int(999)', int)
run_test(REP, 'c_int()', c_int)
run_test(REP, 'c_int(999)', c_int)
run_test(REP, 'c_int_S()', c_int_S)
run_test(REP, 'c_int_S(999)', c_int_S)
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\ctypes\test\test_numbers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:02:44 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
356c3d0d2080ca4ad61b6ecd2046b5002212c549
|
0dd881b86146eff46a99e3100a12addcb5b1bde9
|
/weipinghui2019_zifuchuanxiangjia.py
|
9450013b2f20ba2c8819ba996bb4dcb91ebcc8c8
|
[] |
no_license
|
BaijingML/leetcode
|
8b04599ba6f1f9cf12fbb2726f6a1463a42f0a70
|
0ba37ea32ad71d9467f73da6f9e71971911f1d4c
|
refs/heads/master
| 2020-03-22T05:07:17.884441
| 2020-01-10T12:13:54
| 2020-01-10T12:13:54
| 138,399,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.6
@Author : Zhangfusheng
@Time : 2019/8/18 12:04
@File : weipinghui2019_zifuchuanxiangjia
@Software: PyCharm
"""
if __name__ == "__main__":
s1, s2 = input(), input()
result = ""
add = 0
if len(s1) < len(s2):
s1, s2 = s2, s1
s1, s2 = s1[::-1], s2[::-1]
for index, i in enumerate(s1):
if index > len(s2) - 1:
b = 0
else:
b = int(s2[index])
result += str((int(s1[index]) + b + add) % 2)
if int(s1[index]) + b + add > 1:
add = 1
else:
add = 0
if add == 1:
result += str(1)
print(result[::-1])
|
[
"2670871693@qq.com"
] |
2670871693@qq.com
|
899115ede16865c53b02a851dff925b61a1bf92a
|
6eef7d400474384c9e36cafbbae95e3c34dbb6ad
|
/ben_kremer_clinvitae/urls.py
|
61103ac498270fed074b36b27f1ff25b7c810666
|
[] |
no_license
|
codeAligned/clinvitae
|
61d3c160e9dbc65d548818292681a27501d330ce
|
4a75c14113dc562991c7d2d1a5812d2db91e2da0
|
refs/heads/master
| 2020-05-17T12:02:33.514187
| 2019-02-21T06:47:35
| 2019-02-21T06:47:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('genomic_variants.urls')),
]
|
[
"kremerdesign@gmail.com"
] |
kremerdesign@gmail.com
|
25d1da88d1366f1ccb7cbe8670e06b566940541c
|
c62c9f5cb72e23d9ac35260d9c556b35ae1861e4
|
/collective/z3cform/html5widgets/widget_contenteditable.py
|
e3cdb08143c1fe06cda58e75623139d08489612d
|
[] |
no_license
|
collective/collective.z3cform.html5widgets
|
667cb567d1873cf0ca439df564df8c0cdf4ea6e6
|
3357495e8b445b5d75ccfc14608c55019b01bf6e
|
refs/heads/master
| 2023-03-22T16:39:43.686088
| 2013-12-05T17:01:54
| 2013-12-05T17:01:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
#-*- coding: utf-8 -*-
from zope import interface
import z3c.form.interfaces
import z3c.form.browser.widget
import z3c.form.widget
class IContentEditableWidget(z3c.form.interfaces.IWidget):
""" ContentEditable widget marker for z3c.form"""
class ContentEditableWidget(
z3c.form.browser.widget.HTMLTextInputWidget,
z3c.form.widget.Widget):
"""HTML widget contenteditable"""
interface.implementsOnly(IContentEditableWidget)
klass = u'html5-contenteditable-widget'
def update(self):
super(ContentEditableWidget, self).update()
z3c.form.browser.widget.addFieldClass(self)
def ContentEditableFieldWidget(field, request):
"""IFieldWidget factory for ContentEditableWidget."""
return z3c.form.widget.FieldWidget(field, ContentEditableWidget(request))
|
[
"toutpt@gmail.com"
] |
toutpt@gmail.com
|
2bdb1b2385181fd239dace5a48e5ffa805a4bd4e
|
3c582a006b945cd95974d910ab5b0ff551ab42fa
|
/tsuru_dashboard/auth/tests/test_change_password_form.py
|
7842f5c31e26fd2cb7f2eda76671938ed23b4712
|
[] |
no_license
|
tsuru/tsuru-dashboard
|
f8be15a72366a5cefeadd4a3aac117ed760e85bc
|
c94b0b1a6ec30d7f59b939adcff41646bad00e87
|
refs/heads/master
| 2023-06-22T12:01:20.024933
| 2022-10-20T19:50:47
| 2022-10-20T19:50:47
| 5,112,553
| 119
| 60
| null | 2023-06-13T17:53:35
| 2012-07-19T16:31:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
from django.test import TestCase
from django.forms import PasswordInput
from tsuru_dashboard.auth.forms import ChangePasswordForm
class ChangePasswordFormTest(TestCase):
def test_form_is_valid(self):
data = {
"old": "old",
"new": "new",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertTrue(form.is_valid())
def test_old_is_required(self):
data = {
"new": "new",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_new_is_required(self):
data = {
"old": "old",
"confirm": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_confirm_is_required(self):
data = {
"old": "old",
"new": "new",
}
form = ChangePasswordForm(data)
self.assertFalse(form.is_valid())
def test_old_use_password_input(self):
old_field = ChangePasswordForm.base_fields['old']
self.assertIsInstance(old_field.widget, PasswordInput)
def test_new_use_password_input(self):
new_field = ChangePasswordForm.base_fields['new']
self.assertIsInstance(new_field.widget, PasswordInput)
def test_confirm_use_password_input(self):
confirm_field = ChangePasswordForm.base_fields['confirm']
self.assertIsInstance(confirm_field.widget, PasswordInput)
|
[
"andrewsmedina@gmail.com"
] |
andrewsmedina@gmail.com
|
18ec3b9176c0cadcb71400f69ac095ea871c5eee
|
b4339826d3def43a2553f0ac8d357ed393a8f471
|
/apps/operation/models.py
|
1447f0a0a33afa24c352bdb28c638b809f5acae7
|
[] |
no_license
|
buzzzzx/MultiUser_blog
|
e8c19537d29ab4d8bc97a2ca62703110adc9d683
|
1ff6c2c345051406b5862d902ca51939be755528
|
refs/heads/master
| 2021-08-08T11:47:06.666011
| 2017-11-10T08:51:28
| 2017-11-10T08:51:59
| 110,224,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from django.db import models
from account.models import UserProfile
from blog.models import Post
# Create your models here.
class PostComment(models.Model):
post = models.ForeignKey(Post, related_name='comments') # 可通过post.comments.all()
user = models.ForeignKey(UserProfile, related_name='blog_comments') # 可通过user.blog_comments.all()取回所有评论
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return 'Comment by {} on {}'.format(self.user.username, self.post)
|
[
"buzzzzx233@gmail.com"
] |
buzzzzx233@gmail.com
|
77916008cef97dbe592ed28bdeb1fc24ef507f5b
|
770801815a644df6de1d252799be520f69e467be
|
/dataResearch.py
|
6b536432e9bb4642b8725ba2d3387a16d122c71f
|
[] |
no_license
|
chutianwen/CapitalOneHackerthon
|
ad2b693694945ff56fa5e2ebf1c3a00dfec75439
|
5337b954b529c03c87816e8927cf1620a26e8a49
|
refs/heads/master
| 2021-05-07T13:37:39.336866
| 2017-11-05T17:03:44
| 2017-11-05T17:03:44
| 109,598,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
from collections import Counter
with open("./Dataset/merchant_list.txt") as f:
text = f.read()
merchant_names = text.split(",")
text = text.lower()
text = text.replace('.', ' <PERIOD> ')
text = text.replace(',', ' <COMMA> ')
text = text.replace('"', ' <QUOTATION_MARK> ')
text = text.replace(';', ' <SEMICOLON> ')
text = text.replace('!', ' <EXCLAMATION_MARK> ')
text = text.replace('?', ' <QUESTION_MARK> ')
text = text.replace('(', ' <LEFT_PAREN> ')
text = text.replace(')', ' <RIGHT_PAREN> ')
text = text.replace('--', ' <HYPHENS> ')
text = text.replace('?', ' <QUESTION_MARK> ')
# text = text.replace('\n', ' <NEW_LINE> ')
text = text.replace(':', ' <COLON> ')
text = text.replace('&', ' <AND> ')
text = text.replace('-', ' <DASH> ')
words = text.split()
word_cnt = Counter(words)
print(len(word_cnt))
print(word_cnt)
# trim out unrelated words
unrelated_words = {'<AND>', '<DASH>', 'of', 'the', 'and', 'pa'}
word_cnt_trimmed = {word: word_cnt[word] for word in word_cnt
if word not in unrelated_words and 3 <= word_cnt[word] < 35}
print("Size of trimmed word_cnt:{}".format(len(word_cnt_trimmed)))
print(word_cnt_trimmed)
top_words = sorted(word_cnt_trimmed, key=word_cnt_trimmed.get, reverse=True)
print(top_words)
merchant_names_category = []
for merchant_name in merchant_names:
merchant_name_ori = merchant_name
merchant_name = merchant_name.replace("\"", "")
merchant_name = merchant_name.replace(".", " ")
merchant_name_words = merchant_name.lower().split()
category = "other"
for word in top_words:
merchant_name_words = merchant_name.split()
if word in merchant_name_words:
category = word
break
merchant_names_category.append([merchant_name_ori, category])
merchant_names_category.sort(key=lambda x: x[1])
categories = set(map(lambda x:x[1], merchant_names_category))
print("Categories:", categories)
with open("./Dataset/MerchantName_Category.txt", 'w') as f2:
f2.writelines("{}\t{}\n".format("Merchant Name", "Category"))
for item in merchant_names_category:
f2.writelines("{}\t{}\n".format(item[0], item[1]))
condense_category = {'inn': 'travel', }
|
[
"tianwen.chu@fedcentric.com"
] |
tianwen.chu@fedcentric.com
|
45d2a65651be56165f6cfe5b28a341f127eb57a5
|
302442c32bacca6cde69184d3f2d7529361e4f3c
|
/cidtrsend-all/stage1-model/pytz/zoneinfo/America/Boa_Vista.py
|
297b01070fa499ceb5132271bc1a11bdd52ca2e8
|
[] |
no_license
|
fucknoob/WebSemantic
|
580b85563072b1c9cc1fc8755f4b09dda5a14b03
|
f2b4584a994e00e76caccce167eb04ea61afa3e0
|
refs/heads/master
| 2021-01-19T09:41:59.135927
| 2015-02-07T02:11:23
| 2015-02-07T02:11:23
| 30,441,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
'''tzinfo timezone information for America/Boa_Vista.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Boa_Vista(DstTzInfo):
'''America/Boa_Vista timezone definition. See datetime.tzinfo for details'''
zone = 'America/Boa_Vista'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1914,1,1,4,2,40),
d(1931,10,3,15,0,0),
d(1932,4,1,3,0,0),
d(1932,10,3,4,0,0),
d(1933,4,1,3,0,0),
d(1949,12,1,4,0,0),
d(1950,4,16,4,0,0),
d(1950,12,1,4,0,0),
d(1951,4,1,3,0,0),
d(1951,12,1,4,0,0),
d(1952,4,1,3,0,0),
d(1952,12,1,4,0,0),
d(1953,3,1,3,0,0),
d(1963,12,9,4,0,0),
d(1964,3,1,3,0,0),
d(1965,1,31,4,0,0),
d(1965,3,31,3,0,0),
d(1965,12,1,4,0,0),
d(1966,3,1,3,0,0),
d(1966,11,1,4,0,0),
d(1967,3,1,3,0,0),
d(1967,11,1,4,0,0),
d(1968,3,1,3,0,0),
d(1985,11,2,4,0,0),
d(1986,3,15,3,0,0),
d(1986,10,25,4,0,0),
d(1987,2,14,3,0,0),
d(1987,10,25,4,0,0),
d(1988,2,7,3,0,0),
d(1999,10,3,4,0,0),
d(2000,2,27,3,0,0),
d(2000,10,8,4,0,0),
d(2000,10,15,3,0,0),
]
_transition_info = [
i(-14580,0,'LMT'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
i(-10800,3600,'AMST'),
i(-14400,0,'AMT'),
]
Boa_Vista = Boa_Vista()
|
[
"learnfuzzy@gmail.com"
] |
learnfuzzy@gmail.com
|
663175d84618b1612f00e593024da938380a9840
|
98accbb4d8c8f972bfda31e12fabab7c3ca37533
|
/linux_lou_plus/step_7/multiprocess/process_sys.py
|
4d33b61e088ad55ac86b641f5138761583f1014d
|
[] |
no_license
|
qimanchen/interview_plan
|
49e5a323f35b8b3496d5dc4baba0f12a1b2c2a13
|
6a11a1927a14ce3fc439149e907a3febbee446a7
|
refs/heads/master
| 2022-12-10T05:25:54.927847
| 2020-06-13T02:54:25
| 2020-06-13T02:54:25
| 194,584,004
| 0
| 0
| null | 2022-12-08T05:25:13
| 2019-07-01T02:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 555
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from multiprocessing import Process, Value, Lock
def func(val, lock):
for i in range(50):
time.sleep(0.01)
with lock:
val.value += 1
if __name__ == "__main__":
# 多进程无法使用全局变量,multiprocessing 提供的 Value 是一个代理器,
# 可以实现在多进程中共享这个变量
v = Value('i', 0)
lock = Lock()
procs = [Process(target=func, args=(v, lock)) for i in range(10)]
for p in procs:
p.start()
for p in procs:
p.join()
print(v.value)
|
[
"1033178199@qq.com"
] |
1033178199@qq.com
|
0b01324db38b6537c084e6e7d7954b7f84f49fa4
|
45d1478e29cdc173085e9fe2ae092f4c71ea19f2
|
/retinaface/modeling/__init__.py
|
e09b09632129673b7c4c911168f1f58126455bdd
|
[
"MIT"
] |
permissive
|
OxYunzhe/RetinaFace.detectron2
|
0c5f8659d17fb2e46c194dc0fcbbac7732cd54ab
|
3edf1c2d539763115741819bcf16816d7c2e4c91
|
refs/heads/master
| 2023-03-16T07:50:51.572555
| 2020-06-10T09:41:29
| 2020-06-10T09:41:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
'''
@Copyright (c) tkianai All Rights Reserved.
@Author : tkianai
@Github : https://github.com/tkianai
@Date : 2020-04-21 13:11:05
@FilePath : /RetinaFace.detectron2/retinaface/modeling/__init__.py
@Description :
'''
|
[
"tkianai@163.com"
] |
tkianai@163.com
|
264ccf1d747e420a00b8a22c7b1db3529c65867c
|
a8d68074db5c2b2697650ed0281979d3e00cf5a8
|
/python-spider/shuaia.py
|
4f3d70d1d53400532bacaf55f467f8d4bb664164
|
[] |
no_license
|
15807857476/bogdata-2
|
9595609ea2ae5ae0a48c511f911df2498456467e
|
1934cdfa234b77ca91e349b84688db113ff39e8c
|
refs/heads/master
| 2023-05-26T19:10:18.439269
| 2019-05-24T02:50:41
| 2019-05-24T02:50:41
| 188,327,526
| 3
| 1
| null | 2023-05-22T21:37:27
| 2019-05-24T00:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
# -*- coding:UTF-8 -*-
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import requests
import os
import time
if __name__ == '__main__':
list_url = []
for num in range(1,3):
if num == 1:
url = 'http://www.shuaia.net/index.html'
else:
url = 'http://www.shuaia.net/index_%d.html' % num
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
req = requests.get(url = url,headers = headers)
req.encoding = 'utf-8'
html = req.text
bf = BeautifulSoup(html, 'lxml')
targets_url = bf.find_all(class_='item-img')
for each in targets_url:
list_url.append(each.img.get('alt') + '=' + each.get('href'))
print('连接采集完成')
for each_img in list_url:
img_info = each_img.split('=')
target_url = img_info[1]
filename = img_info[0] + '.jpg'
print('下载:' + filename)
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
img_req = requests.get(url = target_url,headers = headers)
img_req.encoding = 'utf-8'
img_html = img_req.text
img_bf_1 = BeautifulSoup(img_html, 'lxml')
img_url = img_bf_1.find_all('div', class_='wr-single-content-list')
img_bf_2 = BeautifulSoup(str(img_url), 'lxml')
img_url = 'http://www.shuaia.net' + img_bf_2.div.img.get('src')
if 'images' not in os.listdir():
os.makedirs('images')
urlretrieve(url = img_url,filename = 'images/' + filename)
time.sleep(1)
print('下载完成!')
|
[
"2397955090@qq.com"
] |
2397955090@qq.com
|
27662d158b11bc9bb0943ec4d1b442d23925248b
|
b75fa0885bc3ba3f153225fd3396aadef6c1f97e
|
/slides/pypyjs/lib-py3k/modules/_functools.py
|
e51a4aa7f3b13c9b4d6cc9168e73ea720b598012
|
[
"MIT"
] |
permissive
|
rfk/talk-pypyjs-what-how-why
|
e084303185167dbc9b704c3568e0c31d0a1f6885
|
1ab62ee32ff9495ae9313ec81e8ee2044212ea71
|
refs/heads/master
| 2016-09-06T05:27:09.800382
| 2015-04-10T03:12:07
| 2015-04-10T03:12:07
| 22,421,369
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,980
|
py
|
""" Supplies the internal functions for functools.py in the standard library """
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
sentinel = object()
@builtinify
def reduce(func, sequence, initial=sentinel):
"""reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty."""
iterator = iter(sequence)
if initial is sentinel:
try:
initial = next(iterator)
except StopIteration:
raise TypeError("reduce() of empty sequence with no initial value")
result = initial
for item in iterator:
result = func(result, item)
return result
class partial(object):
"""
partial(func, *args, **keywords) - new function with partial application
of the given arguments and keywords.
"""
def __init__(self, *args, **keywords):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("the first argument must be callable")
self._func = func
self._args = args
self._keywords = keywords or None
def __delattr__(self, key):
if key == '__dict__':
raise TypeError("a partial object's dictionary may not be deleted")
object.__delattr__(self, key)
@property
def func(self):
return self._func
@property
def args(self):
return self._args
@property
def keywords(self):
return self._keywords
def __call__(self, *fargs, **fkeywords):
if self.keywords is not None:
fkeywords = dict(self.keywords, **fkeywords)
return self.func(*(self.args + fargs), **fkeywords)
def __repr__(self):
cls = type(self)
if cls is partial:
name = 'functools.partial'
else:
name = cls.__name__
tmp = [repr(self.func)]
for arg in self.args:
tmp.append(repr(arg))
if self.keywords:
for k, v in self.keywords.items():
tmp.append("{}={!r}".format(k, v))
return "{}({})".format(name, ', '.join(tmp))
def __reduce__(self):
d = dict((k, v) for k, v in self.__dict__.items() if k not in
('_func', '_args', '_keywords'))
if len(d) == 0:
d = None
return (type(self), (self.func,),
(self.func, self.args, self.keywords, d))
def __setstate__(self, state):
self._func, self._args, self._keywords, d = state
if d is not None:
self.__dict__.update(d)
|
[
"ryan@rfk.id.au"
] |
ryan@rfk.id.au
|
5bcff5cffd28731828328a79715cde3f608f37b3
|
9fefd87bf65dd0be051988ead6fa532ad968371c
|
/01_MathBasic/ex15.py
|
574694f9302e6bc50f1d3caf72765a3d1df5dfee
|
[] |
no_license
|
et0511/linear-algebra-basics
|
b94659832dd16342bdc73bace18ab21d1fc3458d
|
b9f3a91ea9ba5b1011d619d374f238dd56c09c9a
|
refs/heads/master
| 2023-01-22T05:35:56.850516
| 2020-11-20T08:39:14
| 2020-11-20T08:39:14
| 313,239,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
# 행렬의 산술 연산: 곱셈
import numpy as np
m1 = np.array([
[1, 2, 3],
[4, 5, 6]
])
m2 = np.array([
[10, 20, 30],
[40, 50, 60]
])
m3 = m1 * m2
print(m3)
m4 = np.multiply(m1, m2)
print(m4)
|
[
"shinwon0511@gmail.com"
] |
shinwon0511@gmail.com
|
80a7bbe98fb1db2e7affb126f6379fbca8deaffa
|
0be27c0a583d3a8edd5d136c091e74a3df51b526
|
/pro_6.py
|
cd9f08e7650bc6d5eabf300b5b77d29d6bc94b75
|
[] |
no_license
|
ssangitha/guvicode
|
3d38942f5d5e27a7978e070e14be07a5269b01fe
|
ea960fb056cfe577eec81e83841929e41a31f72e
|
refs/heads/master
| 2020-04-15T05:01:00.226391
| 2019-09-06T10:08:23
| 2019-09-06T10:08:23
| 164,405,935
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(len(l)-2):
for j in range(i+1,len(l)-1):
for k in range(j+1,len(l)):
if l[i]<l[j]<l[k]:
c=c+1
print(c)
#no.of triplet...
|
[
"noreply@github.com"
] |
ssangitha.noreply@github.com
|
f46db137e5ffc06d2d219a96623c75d5281f13d1
|
49986759cb09afe8888e87cb5d3d02defedf7fcf
|
/examples/openpyxl/openpyxl__loadedFiles_example.py
|
43f14fe2d32458d4bd9f8b34649c22fd76ea9b62
|
[] |
no_license
|
Ze1598/Python_stuff
|
ca7e8a85ab693efb1909b1aaf3075906419ab43b
|
df4d9b85eeff4e14c91533135a347b59d52812c7
|
refs/heads/master
| 2023-04-10T16:50:37.719570
| 2023-03-26T15:06:31
| 2023-03-26T15:06:31
| 117,886,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,790
|
py
|
'''
File that goes over basic operations with the openpyxl module: a
module for working with Excel files (.xlsx) and LibreOffice Calc
(.xls), that is, workbook files.
This file goes over the basic operations performed with these files,
without touching any writing to the files.
'''
import openpyxl
# Used to pretty-print the information written to the output file
import pprint
# Input
# -----------------------------------------------------------------
# book(load) a worksheet file (we'll use an Excel file in this\
# case)
wb = openpyxl.load_workbook("openpyxl_sample.xlsx")
# -----------------------------------------------------------------
# Get sheets
# -----------------------------------------------------------------
# Get a list with the names of each sheet in the file
wb_sheets = wb.get_sheet_names()
# Get the sheet with name "Sheet1"
single_sheet = wb.get_sheet_by_name("Sheet1")
# Print the title of the sheet
print(single_sheet.title)
# Gets the sheet currently open in the file
active_sheet = wb.active
print(active_sheet)
print()
# -----------------------------------------------------------------
# Get cells
# -----------------------------------------------------------------
# Print the cell located at A1 in the "Sheet1" sheet
a1_cell = single_sheet["A1"]
print(a1_cell)
# Actually print the value saved in the cell (properly converted to a\
# Python datatype)
print(a1_cell.value)
# Print the coordinates of the A1 cell
print(f"`a1_cell` is located at row {a1_cell.row} and column {a1_cell.column}, that is, {a1_cell.coordinate}.")
# Print the value of another cell (cell located at B1)
# This time we access a cell by calling the `cell()` method and pass the\
# the desired coordinate as row and column values
print(single_sheet.cell(row=1, column=2).value)
print()
# -----------------------------------------------------------------
# Access rows and columns
# -----------------------------------------------------------------
# Print the values saved in all the rows of the second column (B)
print("Values saved in the second column:")
# Loop through all the rows in the second column and print the values found
# Use the `max_row` attribute to find the index of the last row in this sheet
for i in range(1, single_sheet.max_row+1):
print(f"Row {i}, Value {single_sheet.cell(row=i, column=2).value}")
print()
# Print the number of columns found in the current sheet by using the\
# `max_column` attribute
print(f"{single_sheet.title} has {single_sheet.max_column} columns.")
print()
# Extract the first three rows, including columns A through C
extract_rows = tuple(single_sheet["A1":"C3"])
# Each item corresponds to a single row, that is, a single item contains\
# all the cells of that row
print(extract_rows)
print("Loop through rows 1 through 3, including columns A through C.")
for row_of_cell_objs in extract_rows:
for cell_obj in row_of_cell_objs:
print(cell_obj.coordinate, cell_obj.value)
print('--- END OF ROW ---')
print()
# We can loop through all the columns in a given row by with dictionary syntax
# Loop through the cells in the first row column (column B)
for cell_obj in single_sheet[1]:
print(cell_obj.value)
print()
# -----------------------------------------------------------------
# Convert between integer and alphabetic representation of columns
# -----------------------------------------------------------------
# Because a workbook can have many columns, the when it reachs the 27th\
# it needs to start using two letters to represent the column. Thus, we\
# can use `get_column_letter()` method to input the integer representation\
# of a column and get the alphabetic representation returned
print("get_column_letter(27) =>", openpyxl.utils.get_column_letter(27))
print("get_column_letter(900) =>", openpyxl.utils.get_column_letter(900))
# The exact operation, that is, get the integer representation given the\
# alphabetic counterpart, is done via the `column_index_from_string()` method
print("column_index_from_string(AA) =>", openpyxl.utils.column_index_from_string("AA"))
print("column_index_from_string(AHP) =>", openpyxl.utils.column_index_from_string("AHP"))
print()
# -----------------------------------------------------------------
# Load an Excel file, extract data and save it in a new Python file
# -----------------------------------------------------------------
wb = openpyxl.load_workbook("censuspopdata.xlsx")
sheet = wb.get_sheet_by_name("Population by Census Tract")
# Dictionary to hold the extracted data in the format:
# county_data[state][county]["tracts"]
# county_data[state][county]["pop"]
county_data = {}
# Loop through all the rows in the file
for row in range(2, sheet.max_row+1):
# Get the state, county and population count for the current row
state = sheet["B"+str(row)].value
county = sheet["C"+str(row)].value
pop = sheet["D"+str(row)].value
# To make sure a key for the current state exists in the dictionary,\
# create the key-value pair `county_data[state] = {}`
county_data.setdefault(state, {})
# Create default values as well for the values of the current `state`\
# key, so that the `state` key holds a dictionary of the type:
# county_data[state][county]["tracts"]
# county_data[state][county]["pop"]
county_data[state].setdefault( county, {"tracts":0, "pop":0} )
# Since each row represents a census tract, increment the `tracts` key
county_data[state][county]["tracts"] += 1
# While we are in the same row, that is, the same county, add up the\
# population amounts found
county_data[state][county]["pop"] += int(pop)
# Now write the extracted data to a Python file
with open("openpyxl_sample_output_file.py", "w") as f:
f.write("all_data = " + pprint.pformat(county_data))
# -----------------------------------------------------------------
|
[
"jose.fernando.costa.1998@gmailcom"
] |
jose.fernando.costa.1998@gmailcom
|
d64351067c60d186c1b36d2c2daa144f70f80fb5
|
9d931ad4bb1ee0806a0b1012cf551d77199416ae
|
/isobar/pattern/harmony.py
|
b1d8ece983fa44e188235e02acd85ed1e43070e7
|
[
"MIT"
] |
permissive
|
EnigmaCurry/isobar
|
9d8a92c44f9ba7e0eb8b8527fdb1a61691b85fe4
|
05de0d105984b642eeaca3286abf08e02e309362
|
refs/heads/master
| 2022-05-24T23:07:35.859526
| 2020-04-28T23:05:08
| 2020-04-28T23:05:08
| 259,672,137
| 1
| 0
|
MIT
| 2020-04-28T15:11:04
| 2020-04-28T15:11:02
| null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
from isobar.pattern.core import *
class PFilterByKey(Pattern):
def __init__(self, input, key):
self.input = input
self.key = key
def __next__(self):
note = next(self.input)
key = Pattern.value(self.key)
if note in key:
return note
else:
return None
class PNearest(Pattern):
def __init__(self, input, key):
self.input = input
self.key = key
def __next__(self):
note = next(self.input)
key = Pattern.value(self.key)
return key.nearest_note(note)
|
[
"daniel@jones.org.uk"
] |
daniel@jones.org.uk
|
6f3ef8ad3d317b58bdc8a750c23661686421a08f
|
810b7b2bb5829bf9ce0d921395ad6ca22563915c
|
/question-type-fine-num-classifier-builder.py
|
19e0daeeb9bccff0320b77706b209ea2dbd8f908
|
[] |
no_license
|
daksh-ddt/QuestionTypeClassifier
|
5f1ee8e8f017fbe4836e24f943e92dd14ecaebe5
|
33cffbefe1869612f8d39c83bb3e72602060893d
|
refs/heads/master
| 2020-12-25T03:01:05.344465
| 2014-03-22T16:01:24
| 2014-03-22T16:01:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,500
|
py
|
#!/usr/bin/env python
"""
Best score: 0.928
Best parameters set:
clf__alpha: 1e-06
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__norm: 'l1'
tfidf__use_idf: True
vect__max_df: 0.75
vect__max_features: 5000
vect__ngram_range: (1, 2)
vect__stop_words: None
"""
__author__ = 'gavin hackeling'
__email__ = 'gavinhackeling@gmail.com'
import os
from time import time
import pickle
from pprint import pprint
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def grid_search():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
stop_words = [l.strip() for l in open('stop-words.txt', 'rb')]
categories = ['code', 'count', 'date', 'dist', 'money', 'ord', 'other', 'percent', 'period', 'speed', 'temp',
'volsize', 'weight']
train = load_files('fine/NUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__stop_words': ('english', stop_words, None),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
t0 = time()
print 'Performing grid search...'
print 'pipeline:', [name for name, _ in pipeline.steps]
print 'parameters:'
pprint(parameters)
grid_search.fit(X, y)
print 'done in %0.3fs' % (time() - t0)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
def build_model():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
categories = ['code', 'count', 'date', 'dist', 'money', 'ord', 'other', 'percent', 'period', 'speed', 'temp',
'volsize', 'weight']
train = load_files('fine/NUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2), stop_words=None)),
('tfidf', TfidfTransformer(norm='l2', use_idf=False)),
('clf', SGDClassifier(n_iter=80, penalty='elasticnet', alpha=0.0001)),
])
X_train, X_test, y_train, y_test = train_test_split(train.data, train.target, test_size=0.25, random_state=42)
pipeline.fit(X_train, y_train)
print 'classifier score:', pipeline.score(X_test, y_test)
pipeline.fit(X, y)
filehandler = open('fine-num-classifier.p', 'wb')
pickle.dump(pipeline, filehandler)
filehandler.close()
if __name__ == '__main__':
grid_search()
#build_model()
|
[
"gavinhackeling@gmail.com"
] |
gavinhackeling@gmail.com
|
2a5efff102c4a2919f73211d409750b6f210d7c8
|
8aa9ecfe421b196589b6c9fdc0e954d02d927feb
|
/sphinx/source/docs/user_guide/examples/data_linked_brushing_subsets.py
|
49c98cd13e1f65a5c2be6677d3b998058f107edb
|
[
"BSD-3-Clause"
] |
permissive
|
hongyu9000/bokeh
|
b384484925c6c145e4eaf87460a3f776095e81ed
|
b19f2c5547024bdc288d02e73fdb65e65991df5f
|
refs/heads/master
| 2020-09-03T15:57:31.157443
| 2019-11-04T05:25:46
| 2019-11-04T05:25:46
| 219,503,733
| 1
| 0
|
BSD-3-Clause
| 2019-11-04T13:06:20
| 2019-11-04T13:06:19
| null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource, CDSView, BooleanFilter
output_file("linked_selection_subsets.html")
x = list(range(-20, 21))
y0 = [abs(xx) for xx in x]
y1 = [xx**2 for xx in x]
# create a column data source for the plots to share
source = ColumnDataSource(data=dict(x=x, y0=y0, y1=y1))
# create a view of the source for one plot to use
view = CDSView(source=source, filters=[BooleanFilter([True if y > 250 or y < 100 else False for y in y1])])
TOOLS = "box_select,lasso_select,hover,help"
# create a new plot and add a renderer
left = figure(tools=TOOLS, plot_width=300, plot_height=300, title=None)
left.circle('x', 'y0', size=10, hover_color="firebrick", source=source)
# create another new plot, add a renderer that uses the view of the data source
right = figure(tools=TOOLS, plot_width=300, plot_height=300, title=None)
right.circle('x', 'y1', size=10, hover_color="firebrick", source=source, view=view)
p = gridplot([[left, right]])
show(p)
|
[
"bryanv@continuum.io"
] |
bryanv@continuum.io
|
b9ae2e9946a3611c2159abe0a20a624591d0eeca
|
01abb5fe2d6a51e8ee4330eaead043f4f9aad99d
|
/Repo_Files/Zips/plugin.video.streamhub/resources/lib/ssources/moviesplanet.py
|
60b053928b8fa5b276249a2f1673279da57f3e1c
|
[] |
no_license
|
MrAnhell/StreamHub
|
01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f
|
e70f384abf23c83001152eae87c6897f2d3aef99
|
refs/heads/master
| 2021-01-18T23:25:48.119585
| 2017-09-06T12:39:41
| 2017-09-06T12:39:41
| 87,110,979
| 0
| 0
| null | 2017-04-03T19:09:49
| 2017-04-03T19:09:49
| null |
UTF-8
|
Python
| false
| false
| 7,208
|
py
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64,time
from resources.lib.smodules import control
from resources.lib.smodules import pyaes
from resources.lib.smodules import cleantitle
from resources.lib.smodules import client
from resources.lib.smodules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['moviesplanet.is']
self.base_link = 'http://www.moviesplanet.is'
self.search_link = '/ajax/search.php'
self.user = control.setting('moviesplanet.user')
self.password = control.setting('moviesplanet.pass')
def movie(self, imdb, title, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
t = cleantitle.get(title)
h = {'X-Requested-With': 'XMLHttpRequest'}
u = urlparse.urljoin(self.base_link, self.search_link)
p = {'q': title.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
p = urllib.urlencode(p)
r = client.request(u, post=p, headers=h)
r = json.loads(r)
r = [i for i in r if i['meta'].strip().split()[0].lower() == 'movie']
r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
r = [(i[0], client.request(i[1])) for i in r]
r = [(i[0], i[1]) for i in r if not i[1] == None]
r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r if i[1]]
r = [i for i in r if year in i[1]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
t = cleantitle.get(tvshowtitle)
h = {'X-Requested-With': 'XMLHttpRequest'}
u = urlparse.urljoin(self.base_link, self.search_link)
p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
p = urllib.urlencode(p)
r = client.request(u, post=p, headers=h)
r = json.loads(r)
r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv']
r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
r = [(i[0], client.request(i[1])) for i in r]
r = [(i[0], i[1]) for i in r if not i[1] == None]
r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r if i[1]]
r = [i for i in r if year in i[1]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = '%s/season/%01d/episode/%01d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def _gkdecrypt(self, key, str):
try:
key += (24 - len(key)) * '\0'
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationECB(key))
str = decrypter.feed(str.decode('hex')) + decrypter.feed()
str = str.split('\0', 1)[0]
return str
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
headers = {'X-Requested-With': 'XMLHttpRequest'}
login = urlparse.urljoin(self.base_link, '/login')
post = {'username': self.user, 'password': self.password, 'action': 'login'}
post = urllib.urlencode(post)
cookie = client.request(login, post=post, headers=headers, output='cookie')
url = urlparse.urljoin(self.base_link, url)
result = client.request(url, cookie=cookie)
url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
url = url.replace('https://', 'http://')
links = []
try:
dec = re.findall('mplanet\*(.+)', url)[0]
dec = dec.rsplit('&')[0]
dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
dec = directstream.google(dec)
links += [(i['url'], i['quality'], 'gvideo') for i in dec]
except:
pass
result = client.request(url)
try:
url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
for i in url:
try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
except: pass
except:
pass
try:
url = client.parseDOM(result, 'source', ret='src')
url += re.findall('src\s*:\s*\'(.*?)\'', result)
url = [i for i in url if '://' in i]
links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
except:
pass
for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
return url
except:
return
|
[
"mediahubiptv@gmail.com"
] |
mediahubiptv@gmail.com
|
7af29a7adb10691bd1d3489aa0ba317b07c05725
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03433/s436709756.py
|
e163d74f4c2ebdcbfc9df05eb5845a02f71786d2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
import sys
def input(): return sys.stdin.readline().strip()
def resolve():
def main():
n=int(input())
a=int(input())
for i in range(21):
for j in range(a+1):
if 500*i+1*j==n:
return 'Yes'
return 'No'
print(main())
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
977adbcee4f7e66c212151bd3cd2debce6e3a296
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/metadata/metadata/service/cli/sync.py
|
c2b92b897f7fd23b10f29fadae1e110bf5e28230
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,562
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import click
from tinyrpc import RPCClient
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from metadata.runtime import rt_context
from metadata.util.i18n import lazy_selfish as _
module_logger = logging.getLogger(__name__)
@click.command()
@click.option('--min_n', type=int, help=_('Min db operate number.'))
@click.option('--max_n', type=int, help=_('Max db operate number.'))
def replay_db_operate_log(min_n, max_n):
normal_conf = rt_context.config_collection.normal_config
rpc_client = RPCClient(
JSONRPCProtocol(),
HttpPostClientTransport(
'http://{}:{}/jsonrpc/2.0/'.format(normal_conf.ACCESS_RPC_SERVER_HOST, normal_conf.ACCESS_RPC_SERVER_PORT)
),
)
for i in range(min_n, max_n + 1):
print(i)
try:
rpc_client.call(
'bridge_sync',
[],
{"content_mode": "id", "db_operations_list": [i], "batch": False, "rpc_extra": {"language": "zh-hans"}},
)
except Exception:
module_logger.exception('Failt to replay.')
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
711ef1d902fdb1f8db2c88e55e3da7671dffe1c3
|
c974cf94626d04a83f3d5ccb25e06a99df537e21
|
/python/ray/rllib/dqn/models.py
|
c3f4a08eb943e954dbe72a3c3e31f0a9cde49a0b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
vtpp2014/ray
|
00db3465812eb85890351dd345c43d2ed29745b8
|
4e4a4e4e062d37f3fb1c518ea5b0d0d7a32e5a60
|
refs/heads/master
| 2021-01-21T09:20:47.322556
| 2017-08-31T06:40:46
| 2017-08-31T06:40:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,153
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.layers as layers
from ray.rllib.models import ModelCatalog
def _build_q_network(inputs, num_actions, config):
dueling = config["dueling"]
hiddens = config["hiddens"]
frontend = ModelCatalog.get_model(inputs, 1, config["model_config"])
frontend_out = frontend.last_layer
with tf.variable_scope("action_value"):
action_out = frontend_out
for hidden in hiddens:
action_out = layers.fully_connected(
action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(
action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = frontend_out
for hidden in hiddens:
state_out = layers.fully_connected(
state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(
state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1)
return state_score + action_scores_centered
else:
return action_scores
def _build_action_network(
q_values, observations, num_actions, stochastic, eps):
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
random_actions = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(
chose_random, random_actions, deterministic_actions)
return tf.cond(
stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
def _huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta))
def _minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
class DQNGraph(object):
def __init__(self, env, config):
self.env = env
num_actions = env.action_space.n
optimizer = tf.train.AdamOptimizer(learning_rate=config["lr"])
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
self.cur_observations = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
# Action Q network
with tf.variable_scope("q_func") as scope:
q_values = _build_q_network(
self.cur_observations, num_actions, config)
q_func_vars = _scope_vars(scope.name)
# Action outputs
self.output_actions = _build_action_network(
q_values,
self.cur_observations,
num_actions,
self.stochastic,
self.eps)
# Replay inputs
self.obs_t = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
self.act_t = tf.placeholder(tf.int32, [None], name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(
tf.float32, shape=(None,) + env.observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(
tf.float32, [None], name="weight")
# q network evaluation
with tf.variable_scope("q_func", reuse=True):
self.q_t = _build_q_network(self.obs_t, num_actions, config)
# target q network evalution
with tf.variable_scope("target_q_func") as scope:
self.q_tp1 = _build_q_network(self.obs_tp1, num_actions, config)
target_q_func_vars = _scope_vars(scope.name)
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(
self.q_t * tf.one_hot(self.act_t, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
with tf.variable_scope("q_func", reuse=True):
q_tp1_using_online_net = _build_q_network(
self.obs_tp1, num_actions, config)
q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(
self.q_tp1 * tf.one_hot(
q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(self.q_tp1, 1)
q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = self.rew_t + config["gamma"] * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = _huber_loss(self.td_error)
weighted_error = tf.reduce_mean(self.importance_weights * errors)
# compute optimization op (potentially with gradient clipping)
if config["grad_norm_clipping"] is not None:
self.optimize_expr = _minimize_and_clip(
optimizer, weighted_error, var_list=q_func_vars,
clip_val=config["grad_norm_clipping"])
else:
self.optimize_expr = optimizer.minimize(
weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
for var, var_target in zip(
sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
self.update_target_expr = tf.group(*update_target_expr)
def update_target(self, sess):
return sess.run(self.update_target_expr)
def act(self, sess, obs, eps, stochastic=True):
return sess.run(
self.output_actions,
feed_dict={
self.cur_observations: obs,
self.stochastic: stochastic,
self.eps: eps,
})
def train(
self, sess, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err, _ = sess.run(
[self.td_error, self.optimize_expr],
feed_dict={
self.obs_t: obs_t,
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: obs_tp1,
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err
|
[
"pcmoritz@gmail.com"
] |
pcmoritz@gmail.com
|
ffd098afc7c7363e15f0181d9a02f89ec7709a69
|
a77802040fac0c1207902946f3a1e8a2d9a4fbd0
|
/examples/ccxt.pro/py/binance-reload-markets.py
|
50ad40b078a268a69c9fd85cc548d15e0af10bb8
|
[
"MIT"
] |
permissive
|
ndubel/ccxt
|
6eef84cddc9ac42db2fd03b9eaa04befd59775e6
|
982a12b1ab0f02a11911bee8a0aba8ad4f35ded1
|
refs/heads/master
| 2023-07-29T15:45:37.941234
| 2022-04-05T10:26:05
| 2022-04-05T10:26:05
| 331,591,556
| 1
| 0
|
MIT
| 2021-01-21T10:24:16
| 2021-01-21T10:24:15
| null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
import ccxtpro
from asyncio import get_event_loop, gather
print('CCXT Pro version', ccxtpro.__version__)
async def watch_order_book(exchange, symbol):
while True:
try:
orderbook = await exchange.watch_order_book(symbol)
datetime = exchange.iso8601(exchange.milliseconds())
print(datetime, orderbook['nonce'], symbol, orderbook['asks'][0], orderbook['bids'][0])
except Exception as e:
print(type(e).__name__, str(e))
break
async def reload_markets(exchange, delay):
while True:
try:
await exchange.sleep(delay)
markets = await exchange.load_markets(True)
datetime = exchange.iso8601(exchange.milliseconds())
print(datetime, 'Markets reloaded')
except Exception as e:
print(type(e).__name__, str(e))
break
async def main(loop):
exchange = ccxtpro.binance({
'enableRateLimit': True,
'asyncio_loop': loop,
})
await exchange.load_markets()
# exchange.verbose = True
symbol = 'BTC/USDT'
delay = 60000 # every minute = 60 seconds = 60000 milliseconds
loops = [watch_order_book(exchange, symbol), reload_markets(exchange, delay)]
await gather(*loops)
await exchange.close()
loop = get_event_loop()
loop.run_until_complete(main(loop))
|
[
"igor.kroitor@gmail.com"
] |
igor.kroitor@gmail.com
|
ea01353fee3883e777cd572d6adebbe811a1ae98
|
8a61f0803e9d18772d867857a5a2a15e9549e6f5
|
/pnu/urls.py
|
6434f255de0516ae85606ecbb65c5bbecf72cc79
|
[] |
no_license
|
hjlee73/pnu-django-201904
|
94504935bf5bdb511d7b99c138e3ea3d9ac5f758
|
6e39a9b63525a8312968cabcc190f8661e1aa9e3
|
refs/heads/master
| 2020-05-20T18:19:55.389915
| 2019-04-12T08:22:12
| 2019-04-12T08:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from django.contrib import admin
from django.urls import include, path
# FIXME: 이 코드는 RedirectView에 의해서 제거될 것입니다.
from django.shortcuts import redirect
def root(request):
return redirect("/shop/")
urlpatterns = [
path('admin/', admin.site.urls),
path('shop/', include('shop.urls')),
path('', root),
]
|
[
"me@askcompany.kr"
] |
me@askcompany.kr
|
552213ea699ee5c5dbeb98cc9cf8b5eaf76bed64
|
cf6646983a0088248acfc5dafefd847a350bac94
|
/posts/views/admin.py
|
0e677adb3fb805b838df242ecf343a7ae63fbd7b
|
[
"MIT"
] |
permissive
|
Toxblh/vas3k.club
|
a370239e95b496f234cd6dd018881f1dcfa78d69
|
97fa3c815b25a6af789ba04892627f6c6c822113
|
refs/heads/master
| 2022-11-09T01:43:56.581340
| 2020-07-05T18:51:21
| 2020-07-05T18:51:21
| 277,580,310
| 1
| 0
|
MIT
| 2020-07-06T15:30:57
| 2020-07-06T15:30:56
| null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
from django.shortcuts import get_object_or_404, render
from auth.helpers import auth_required, moderator_role_required
from bot.common import render_html_message
from notifications.telegram.posts import announce_in_club_channel
from posts.admin import do_post_admin_actions
from posts.forms.admin import PostAdminForm, PostAnnounceForm
from posts.helpers import extract_any_image
from posts.models import Post
@auth_required
@moderator_role_required
def admin_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
if request.method == "POST":
form = PostAdminForm(request.POST)
if form.is_valid():
return do_post_admin_actions(request, post, form.cleaned_data)
else:
form = PostAdminForm()
return render(request, "admin/simple_form.html", {
"title": "Админить пост",
"post": post,
"form": form
})
@auth_required
@moderator_role_required
def announce_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
initial = {
"text": render_html_message("channel_post_announce.html", post=post),
"image": extract_any_image(post),
}
if request.method == "POST":
form = PostAnnounceForm(request.POST, initial=initial)
if form.is_valid():
announce_in_club_channel(
post=post,
announce_text=form.cleaned_data["text"],
image=form.cleaned_data["image"] if form.cleaned_data["with_image"] else None
)
return render(request, "message.html", {
"title": "Запощено ✅"
})
else:
form = PostAnnounceForm(initial=initial)
return render(request, "admin/simple_form.html", {
"title": "Анонсировать пост на канале",
"post": post,
"form": form
})
|
[
"me@vas3k.ru"
] |
me@vas3k.ru
|
048521fbf9c0d46993e2505ccc84c3e21302ed2a
|
ec65636f2f0183c43b1ec2eac343b9aa1fc7c459
|
/train/abnormal_detection_new/10.133.200.69/session_active.py
|
17249ec5279b8fc88290cc73764f849af4dd5cb5
|
[] |
no_license
|
tyroarchitect/AIOPs
|
db5441e5180fcace77b2d1022adb53bbd0b11f23
|
46fe93329a1847efa70e5b73bcbfd54469645cdd
|
refs/heads/master
| 2020-04-16T13:45:02.963404
| 2018-11-15T06:50:57
| 2018-11-15T06:51:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,203
|
py
|
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# settings of lstm model
timesteps = 20
batch_size = 64
epochs = 5
lstm_size = 30
lstm_layers = 2
filename = "../../../datasets/1-10.133.200.69_20181027_20181109.csv"
model = "../../../model/abnormal_detection_model_new/10.133.200.69/session_active_model/SESSION_ACTIVE_MODEL"
column = "SESSION_ACTIVE"
start = 224559
end = 241838
class NewData(object):
def __init__(self, filename, column, timesteps, start, end):
self.timesteps = timesteps
self.filename = filename
self.column = column
self.start = start
self.end = end
self.train_x, self.train_y, self.test_x, self.test_y = self.preprocess()
def MaxMinNormalization(self, x, max_value, min_value):
"""
:param x: data
:param max_value: max value in the data
:param min_value: min value in the data
:return: normalization data
"""
x = (x - min_value) / (max_value - min_value)
return x
def generateGroupDataList(self, seq):
"""
:param seq: continuous sequence of value in data
:return: input data array and label data array in the format of numpy
"""
x = []
y = []
for i in range(len(seq) - self.timesteps):
x.append(seq[i: i + self.timesteps])
y.append(seq[i + self.timesteps])
return np.array(x, dtype=np.float32), np.array(y, dtype=np.float32)
def preprocess(self):
"""
:return: training data and testing data of given filename and column
"""
data = pd.read_csv(self.filename)
data = data["VALUE"].values.tolist()
data = data[self.start - 1:self.end]
data = self.MaxMinNormalization(data,
np.max(data, axis=0),
np.min(data, axis=0))
train_x, train_y = self.generateGroupDataList(data)
test_x, test_y = self.generateGroupDataList(data)
return train_x, train_y, test_x, test_y
def getBatches(self, x, y, batch_size):
for i in range(0, len(x), batch_size):
begin_i = i
end_i = i + batch_size if (i + batch_size) < len(x) else len(x)
yield x[begin_i:end_i], y[begin_i:end_i]
def initPlaceholder(timesteps):
x = tf.placeholder(tf.float32, [None, timesteps, 1], name='input_x')
y_ = tf.placeholder(tf.float32, [None, 1], name='input_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return x, y_, keep_prob
def lstm_model(x, lstm_size, lstm_layers, keep_prob):
# define basis structure LSTM cell
def lstm_cell():
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# multi layer LSTM cell
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(lstm_layers)])
# dynamic rnn
outputs, final_state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
# reverse
outputs = outputs[:, -1]
# fully connected
predictions = tf.contrib.layers.fully_connected(outputs, 1, activation_fn=tf.sigmoid)
return predictions
def train_model():
# prepare data
data = NewData(filename=filename, column=column, timesteps=timesteps, start=start, end=end)
# init placeholder
x, y, keep_prob = initPlaceholder(timesteps)
predictions = lstm_model(x,
lstm_size=lstm_size,
lstm_layers=lstm_layers,
keep_prob=keep_prob)
# mse loss function
cost = tf.losses.mean_squared_error(y, predictions)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
tf.add_to_collection("predictions", predictions)
saver = tf.train.Saver()
# define session
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
tf.global_variables_initializer().run()
# batches counter
iteration = 1
# loop for training
for epoch in range(epochs):
for xs, ys in data.getBatches(data.train_x, data.train_y, batch_size):
feed_dict = {x: xs[:, :, None], y: ys[:, None], keep_prob: .5}
loss, train_step = sess.run([cost, optimizer], feed_dict=feed_dict)
if iteration % 100 == 0:
print('Epochs:{}/{}'.format(epoch, epochs),
'Iteration:{}'.format(iteration),
'Train loss: {}'.format(loss))
iteration += 1
# save model as checkpoint format to optional folder
saver.save(sess, model)
# test model
feed_dict = {x: data.test_x[:, :, None], keep_prob: 1.0}
results = sess.run(predictions, feed_dict=feed_dict)
plt.plot(results, 'r', label='predicted')
plt.plot(data.test_y, 'g--', label='real')
plt.legend()
plt.show()
if __name__ == "__main__":
train_model()
|
[
"873869027@qq.com"
] |
873869027@qq.com
|
899ddad0ccaa03f1cccb2693cbfe0916db0a2112
|
cd781c114deb0ee56fcd8e35df038397ebf8dc09
|
/Classes and Objects basics/Class.py
|
90dd512abd00251408ad11e23ac9c5e7d11cfc57
|
[] |
no_license
|
GBoshnakov/SoftUni-Fund
|
4549446c3bb355ff74c14d6071d968bde1886de5
|
de9318caaf072a82a9be8c3dd4e74212b8edd79e
|
refs/heads/master
| 2023-06-06T04:56:14.951452
| 2021-06-30T21:50:44
| 2021-06-30T21:50:44
| 381,817,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
class Class:
__students_count = 22
def __init__(self, name):
self.name = name
self.students = []
self.grades = []
def add_student(self, name, grade):
if Class.__students_count > len(self.students):
self.students.append(name)
self.grades.append(grade)
def get_average_grade(self):
return sum(self.grades) / len(self.grades)
def __repr__(self):
return f"The students in {self.name}: {', '.join(self.students)}. Average grade: {self.get_average_grade():.2f}"
a_class = Class("11B")
a_class.add_student("Peter", 4.80)
a_class.add_student("George", 6.00)
a_class.add_student("Amy", 3.50)
print(a_class)
|
[
"boshnakov.g@gmail.com"
] |
boshnakov.g@gmail.com
|
f2427f13f588cf7376778e89b011741ee33122e9
|
24eeb28433680606f9d1e099b19ec595552cf06b
|
/repo/plugin.video.shadow/resources/modules/mediaurl.py
|
e9b928533b79bd2f69b6f54703a5a709481a89e7
|
[] |
no_license
|
irmu/arda
|
d8ecdedc17bb01650b538dc9e00f438b6d0eed5a
|
9b7cab3656c2497c812ab101a56ed661dd8cf4a7
|
refs/heads/main
| 2023-09-01T08:48:02.823681
| 2023-08-29T18:11:28
| 2023-08-29T18:11:28
| 151,835,016
| 7
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
'''
Copyright (C) 2014-2016 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
#
#
import logging
#
class mediaurl:
# CloudService v0.2.4
##
##
def __init__(self, url, qualityDesc, quality, order, title=''):
self.url = url
self.qualityDesc = qualityDesc
self.quality = quality
self.order = order
self.title = title
self.offline = False
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.order)
def __cmp__(self, other):
if hasattr(other, 'order'):
return self.order.__cmp__(other.order)
def getKey(self):
return self.order
|
[
"zacharias.sama@gmail.com"
] |
zacharias.sama@gmail.com
|
7864ee4973a26fb92dac10a132763892fb308197
|
b54eb04ec2de1dec11a7143c6b5049a1d031ddaf
|
/test/baselines/bench/monitor.py
|
de942e56796664c57ce2c88a82de1e85341665de
|
[] |
no_license
|
Jerryxiaoyu/CR_CPG_RL
|
78c4c6e7539f08465b1f55125e04f982b1323bf2
|
69213cc48440ea66c42fbe3ace35163174686321
|
refs/heads/master
| 2020-03-28T12:11:51.491796
| 2018-09-14T04:32:33
| 2018-09-14T04:32:33
| 148,277,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,832
|
py
|
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
import numpy as np
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords+info_keywords)
self.logger.writeheader()
self.f.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
def test_monitor():
import pandas
import os
import uuid
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file)
|
[
"drl@wjy.com"
] |
drl@wjy.com
|
2df22224aa226dc75cbe956bcad704c9efbeb719
|
d12b59b33df5c467abf081d48e043dac70cc5a9c
|
/uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/dhcpv6serverglobals/dhcpv6serverglobals.py
|
321f4303de80c42f3b8c01d087612a4cd30a8aff
|
[
"MIT"
] |
permissive
|
ajbalogh/ixnetwork_restpy
|
59ce20b88c1f99f95a980ff01106bda8f4ad5a0f
|
60a107e84fd8c1a32e24500259738e11740069fd
|
refs/heads/master
| 2023-04-02T22:01:51.088515
| 2021-04-09T18:39:28
| 2021-04-09T18:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,767
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Dhcpv6ServerGlobals(Base):
"""Global settings placeholder for DHCPv6Server running over PPP/L2TP.
The Dhcpv6ServerGlobals class encapsulates a list of dhcpv6ServerGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the Dhcpv6ServerGlobals.find() method.
The list can be managed by using the Dhcpv6ServerGlobals.add() and Dhcpv6ServerGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dhcpv6ServerGlobals'
_SDM_ATT_MAP = {
'DefaultLeaseTime': 'defaultLeaseTime',
'MaxLeaseTime': 'maxLeaseTime',
'ObjectId': 'objectId',
}
def __init__(self, parent):
super(Dhcpv6ServerGlobals, self).__init__(parent)
@property
def DefaultLeaseTime(self):
"""
Returns
-------
- number: The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
"""
return self._get_attribute(self._SDM_ATT_MAP['DefaultLeaseTime'])
@DefaultLeaseTime.setter
def DefaultLeaseTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['DefaultLeaseTime'], value)
@property
def MaxLeaseTime(self):
"""
Returns
-------
- number: The maximum Life Time length in seconds that will be assigned to a lease.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxLeaseTime'])
@MaxLeaseTime.setter
def MaxLeaseTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxLeaseTime'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
def update(self, DefaultLeaseTime=None, MaxLeaseTime=None):
"""Updates dhcpv6ServerGlobals resource on the server.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, DefaultLeaseTime=None, MaxLeaseTime=None):
"""Adds a new dhcpv6ServerGlobals resource on the server and adds it to the container.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
Returns
-------
- self: This instance with all currently retrieved dhcpv6ServerGlobals resources using find and the newly added dhcpv6ServerGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpv6ServerGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, DefaultLeaseTime=None, MaxLeaseTime=None, ObjectId=None):
"""Finds and retrieves dhcpv6ServerGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpv6ServerGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpv6ServerGlobals resources from the server.
Args
----
- DefaultLeaseTime (number): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
- MaxLeaseTime (number): The maximum Life Time length in seconds that will be assigned to a lease.
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching dhcpv6ServerGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpv6ServerGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpv6ServerGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
10d43faffcf81b8dde6fa7ea3ea774c52a986c6b
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Numerical_Heat_Transfer_and_Fluid_Flow_Patankar/CFD_books_codes-master/Patnakar/2D_SIMPLE_algorithm.py
|
1ede47be2bee4e11dc15afb666b92bb83cea0fe6
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
import numpy as np
from matplotlib import pyplot as plt
# Peclet function scheme
def funcPeclet(P, n):
if n == 1:
# Central Difference
return 1 - 0.5*np.mod(P, 1)
if n == 2:
# Upwind
return 1
if n == 3:
# Hybrid
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 1)))
if n == 4:
# Power law
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 5)))
else:
# Return power law by default
return max(0, 1 - (0.1 * pow(np.mod(P, 1), 5)))
# Define the domain
x_len = 8
y_len = 8
x_points = 11
y_points = 11
del_x = x_len/float(x_points-1)
del_y = y_len/float(y_points-1)
x = np.arange(x_points+1)
y = np.arange(y_points+1)
f = 0.5
x_w = np.arange(x[1] - f, x[-2], 1)
x_e = np.arange(x[1] + f, x[-1], 1)
y_s = np.arange(y[1] - f, y[-2], 1)
y_n = np.arange(y[1] + f, y[-1], 1)
u = np.zeros((x_points-1, y_points-1))
v = np.zeros((x_points-1, y_points-1))
u_star = np.zeros((x_points-1, y_points-1))
v_star = np.zeros((x_points-1, y_points-1))
P = np.zeros((x_points, y_points))
P_star = np.zeros((x_points, y_points))
P_corr = np.zeros((x_points, y_points))
# Boundary conditions
u[0,:] = 10
v[:,0] = 11
P[0,:] = 20
P[-1,:] = 10
rho = 1
Sc = 50 # Linearization of source term
Sp = 0
Gamma = 1 # Assuming equal Gamma (diffusive coefficient) throughout the domain
n = 1 # Power scheme
alpha = 1 # Relaxation factor
n_itrs = 100
for itrs in range(n_itrs):
for i in range(1, x_points-2):
for j in range(1, y_points-2):
del_x_e = x[i + 1] - x[i]
del_x_w = x[i] - x[i - 1]
del_y_s = y[j] - y[j - 1]
del_y_n = y[j + 1] - y[j]
De, Dw = Gamma * del_y / float(del_x_e), Gamma * del_y / float(del_x_w)
Dn, Ds = Gamma * del_x / float(del_y_n), Gamma * del_x / float(del_y_s)
Dpe, Dpn = Gamma * del_y / float(del_x), Gamma * del_x / float(del_y)
Fe, Fw = rho * u[i+1,j] * del_y, rho * u[i-1,j] * del_y
Fn, Fs = rho * v[i,j+1] * del_x, rho * v[i,j-1] * del_x
Fpe, Fpn = rho * u[i,j] * del_y, rho * v[i,j] * del_x
Pe, Pw = Fe / float(De), Fw / float(Dw)
Pn, Ps = Fn / float(Dn), Fs / float(Ds)
Ppe, Ppn = Fpe / float(Dpe), Fpn / float(Dpn)
aE = De * funcPeclet(Pe, n) + max(-1 * Fe, 0)
aW = Dw * funcPeclet(Pw, n) + max(-1 * Fw, 0)
aN = Dn * funcPeclet(Pn, n) + max(-1 * Fn, 0)
aS = Ds * funcPeclet(Ps, n) + max(-1 * Fs, 0)
aP_e, aP_n = Dpe * funcPeclet(Ppe, n) + max(-1 * Fpe, 0), Dpn * funcPeclet(Ppn, n) + max(-1 * Fpn, 0)
b = Sc * del_x * del_y
u_star[i,j] = ((aE * u[i + 1, j] + aW * u[i - 1, j] + aN * v[i, j + 1] + aS * v[i, j - 1]) + b + (
P[i, j] - P[i + 1, j]) * del_y) / float(aP_e)
v_star[i,j] = ((aE * u[i + 1, j] + aW * u[i - 1, j] + aN * v[i, j + 1] + aS * v[i, j - 1]) + b + (
P[i, j] - P[i, j+1]) * del_x) / float(aP_n)
d_e = del_y/float(aP_e)
d_w = d_e
d_n = del_x/float(aP_n)
d_s = d_n
aE = rho * d_e * del_y
aW = rho * d_w * del_y
aN = rho * d_n * del_x
aS = rho * d_s * del_x
aP = aE + aW + aN + aS
b1 = rho * (u_star[i, j] - u_star[i + 1, j]) * del_y + rho * (v_star[i, j] - v_star[i, j + 1]) * del_x
P_corr[i,j] = (aE*P_corr[i+1, j] + aW*P_corr[i-1,j] + aN*P[i,j+1] + aS*P[i,j-1] + b1)/float(aP)
P[i,j] = P_star[i,j] + alpha*P_corr[i,j]
u[i, j] = u_star[i, j] + d_e * (P_corr[i, j] - P_corr[i + 1, j])
v[i, j] = v_star[i, j] + d_n * (P_corr[i, j] - P_corr[i, j + 1])
for i in range(0, x_points):
for j in range(0, y_points):
P_star[i,j] = P_corr[i,j]
print ("\n Pressure distribution is: \n" + str(P))
print ("\n The max pressure is: \t" + str(P.max()))
xx = np.linspace(0, x_len, x_points+1)
yy = np.linspace(0, y_len, y_points+1)
cmap = plt.pcolormesh(xx, yy, P) # https://scientific-python-101.readthedocs.io/matplotlib/pcolormesh_plots.html
plt.colorbar(cmap)
plt.show()
|
[
"me@yomama.com"
] |
me@yomama.com
|
5d73acd96034f8bcfb122c5a8fd790141f859aa3
|
50b2a447cccfbad70ca2f7d19d4550a708a74cbe
|
/blog/urls.py
|
6bfee92f9dce6c0387b338a007edade69a9decda
|
[] |
no_license
|
halsayed/portfolio_project
|
c07fa9f21f252d5cce130c78220ceefed2504675
|
c22c3d219b126b71951377285b18ce1b438a1b1d
|
refs/heads/master
| 2020-03-20T11:25:37.333806
| 2018-06-17T21:18:50
| 2018-06-17T21:18:50
| 137,402,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.allblogs, name='allblogs', ),
path('<int:blog_id>/', views.blogdetail, name='blogdetail')
]
|
[
"husain@alsayed.ws"
] |
husain@alsayed.ws
|
6c04ed9d151a7bd672fa94d24da5c68676b8669c
|
bbe7d6d59ef6d7364ff06377df9658367a19c425
|
/cogdominium/CogdoBarrelRoom.py
|
2bda0b18b835d5204d88476acf5c89a676ed85f0
|
[
"Apache-2.0"
] |
permissive
|
DedMemez/ODS-August-2017
|
1b45c912ad52ba81419c1596644d8db2a879bd9b
|
5d6214732e3245f63bfa250e3e9c881cc2dc28ad
|
refs/heads/master
| 2021-01-22T18:37:51.626942
| 2017-08-19T02:04:51
| 2017-08-19T02:04:51
| 100,762,513
| 0
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,736
|
py
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.cogdominium.CogdoBarrelRoom
from panda3d.core import Camera, Fog, Lens, Light, Point3, Vec3
import random
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals, ToontownTimer
from toontown.cogdominium import CogdoBarrelRoomConsts, CogdoBarrelRoomRewardPanel
from toontown.distributed import DelayDelete
class CogdoBarrelRoom:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogdoBarrelRoom')
def __init__(self):
self.timer = None
self.model = None
self._isLoaded = False
self.dummyElevInNode = None
self.cogdoBarrelsNode = None
self.entranceNode = None
self.nearBattleNode = None
self.rewardUi = None
self.rewardUiTaskName = 'CogdoBarrelRoom-RewardUI'
self.rewardCameraTaskName = 'CogdoBarrelRoom-RewardCamera'
self.fog = None
self.defaultFar = None
self.stomperSfx = None
return
def destroy(self):
self.unload()
def load(self):
if self._isLoaded:
return
self.timer = ToontownTimer.ToontownTimer()
self.timer.stash()
self.model = loader.loadModel(CogdoBarrelRoomConsts.BarrelRoomModel)
self.model.setPos(*CogdoBarrelRoomConsts.BarrelRoomModelPos)
self.model.reparentTo(render)
self.model.stash()
self.entranceNode = self.model.attachNewNode('door-entrance')
self.entranceNode.setPos(0, -65, 0)
self.nearBattleNode = self.model.attachNewNode('near-battle')
self.nearBattleNode.setPos(0, -25, 0)
self.rewardUi = CogdoBarrelRoomRewardPanel.CogdoBarrelRoomRewardPanel()
self.hideRewardUi()
self.stomperSfx = loader.loadSfx(CogdoBarrelRoomConsts.StomperSound)
self.fog = Fog('barrel-room-fog')
self.fog.setColor(CogdoBarrelRoomConsts.BarrelRoomFogColor)
self.fog.setLinearRange(*CogdoBarrelRoomConsts.BarrelRoomFogLinearRange)
self.brBarrel = render.attachNewNode('@@CogdoBarrels')
for i in xrange(len(CogdoBarrelRoomConsts.BarrelProps)):
self.bPath = self.brBarrel.attachNewNode('%s%s' % (CogdoBarrelRoomConsts.BarrelPathName, i))
self.bPath.setPos(CogdoBarrelRoomConsts.BarrelProps[i]['pos'])
self.bPath.setH(CogdoBarrelRoomConsts.BarrelProps[i]['heading'])
self._isLoaded = True
def unload(self):
if self.model:
self.model.removeNode()
self.model = None
if self.timer:
self.timer.destroy()
self.timer = None
if self.rewardUi:
self.rewardUi.destroy()
self.rewardUi = None
if hasattr(self, 'fog'):
if self.fog:
render.setFogOff()
del self.fog
taskMgr.remove(self.rewardUiTaskName)
taskMgr.remove(self.rewardCameraTaskName)
self._isLoaded = False
return
def isLoaded(self):
return self._isLoaded
def show(self):
if not self.cogdoBarrelsNode:
self.cogdoBarrelsNode = render.find('**/@@CogdoBarrels')
if not self.cogdoBarrelsNode.isEmpty():
self.cogdoBarrelsNode.reparentTo(self.model)
self.cogdoBarrelsNode.unstash()
base.localAvatar.b_setAnimState('neutral')
self.defaultFar = base.camLens.getFar()
base.camLens.setFar(CogdoBarrelRoomConsts.BarrelRoomCameraFar)
base.camLens.setMinFov(settings['fov'] / (4.0 / 3.0))
self.showBattleAreaLight(True)
render.setFog(self.fog)
self.model.unstash()
def hide(self):
self.model.stash()
if self.defaultFar is not None:
base.camLens.setFar(self.defaultFar)
return
def activate(self):
self.notify.info('Activating barrel room: %d sec timer.' % CogdoBarrelRoomConsts.CollectionTime)
self.timer.unstash()
self.timer.posAboveShtikerBook()
self.timer.countdown(CogdoBarrelRoomConsts.CollectionTime)
base.cr.playGame.getPlace().fsm.request('walk')
def deactivate(self):
self.notify.info('Deactivating barrel room.')
self.timer.stop()
self.timer.stash()
def placeToonsAtEntrance(self, toons):
for i in xrange(len(toons)):
toons[i].setPosHpr(self.entranceNode, *CogdoBarrelRoomConsts.BarrelRoomPlayerSpawnPoints[i])
def placeToonsNearBattle(self, toons):
for i in xrange(len(toons)):
toons[i].setPosHpr(self.nearBattleNode, *CogdoBarrelRoomConsts.BarrelRoomPlayerSpawnPoints[i])
def showBattleAreaLight(self, visible = True):
lightConeNode = self.model.find('**/battleCone')
if lightConeNode != None and not lightConeNode.isEmpty():
if visible:
lightConeNode.show()
else:
lightConeNode.hide()
return
def getIntroInterval(self):
avatar = base.localAvatar
trackName = '__introBarrelRoom-%d' % avatar.doId
track = Parallel(name=trackName)
track.append(self.__stomperIntervals())
track.append(Sequence(Func(camera.reparentTo, render), Func(camera.setPosHpr, self.model, -20.0, -87.9, 12.0, -30, 0, 0), Func(base.transitions.irisIn, 0.5), Wait(1.0), LerpHprInterval(camera, duration=2.0, startHpr=Vec3(-30, 0, 0), hpr=Vec3(0, 0, 0), blendType='easeInOut'), Wait(2.5), LerpHprInterval(camera, duration=3.0, startHpr=Vec3(0, 0, 0), hpr=Vec3(-45, 0, 0), blendType='easeInOut'), Wait(2.5)))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'introBarrelRoomTrack')
track.setDoneEvent(trackName)
return (track, trackName)
def __stomperIntervals(self):
ivals = [SoundInterval(self.stomperSfx)]
i = 0
for stomperDef in CogdoBarrelRoomConsts.StomperProps:
stomperNode = render.find(stomperDef['path'])
if stomperNode:
maxZ = random.uniform(10, 20)
minZ = maxZ - 10
if stomperDef['motion'] == 'up':
startZ, destZ = minZ, maxZ
else:
startZ, destZ = maxZ, minZ
stomperNode.setPos(Point3(0, 0, startZ))
ivals.append(LerpPosInterval(stomperNode, CogdoBarrelRoomConsts.StomperHaltTime, Point3(0, 0, destZ), blendType='easeOut'))
i += 1
return Parallel(*tuple(ivals))
def __rewardUiTimeout(self, callback):
self.hideRewardUi()
if callback is not None:
callback()
return
def __rewardCamera(self):
trackName = 'cogdoBarrelRoom-RewardCamera'
track = Sequence(Func(camera.reparentTo, render), Func(camera.setPosHpr, self.model, 0, 0, 11.0, 0, -14, 0), Func(self.showBattleAreaLight, False), name=trackName)
return (track, trackName)
def showRewardUi(self, callback = None):
track, trackName = self.__rewardCamera()
if CogdoBarrelRoomConsts.ShowRewardUI:
self.rewardUi.setRewards()
self.rewardUi.unstash()
taskMgr.doMethodLater(CogdoBarrelRoomConsts.RewardUiTime, self.__rewardUiTimeout, self.rewardUiTaskName, extraArgs=[callback])
return (track, trackName)
def setRewardResults(self, results):
self.rewardUi.setRewards(results)
def hideRewardUi(self):
self.rewardUi.stash()
taskMgr.remove(self.rewardUiTaskName)
|
[
"noreply@github.com"
] |
DedMemez.noreply@github.com
|
46fe97177f3bbbce62cfb44bf9651cb61d005666
|
39d17148ac0517af1b0fdfc9e0f2d962183b7953
|
/dev/benchmarks/bench_check.py
|
d393c4fdb7662e18e06543bc2386b656eabe00af
|
[
"Apache-2.0"
] |
permissive
|
Erotemic/progiter
|
a784f69fbca990a53d7f5abd56527f56b720c7f1
|
2d8e1a90b2de59e74c21b5f026832a119aa840e1
|
refs/heads/main
| 2023-06-22T05:01:03.177353
| 2023-06-20T17:36:08
| 2023-06-20T17:36:08
| 134,924,299
| 15
| 2
|
Apache-2.0
| 2023-06-11T21:35:08
| 2018-05-26T02:59:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,753
|
py
|
import ubelt as ub
import progiter
import timerit
def basic_benchmark():
"""
Run the simplest benchmark where we iterate over nothing and compare the
slowdown of using a progress iterator versus doing nothing.
"""
N = 100_000
ti = timerit.Timerit(21, bestof=3, verbose=2)
for timer in ti.reset('baseline'):
for i in range(N):
...
# for timer in ti.reset('ubelt progiter'):
# for i in ub.ProgIter(range(N)):
# ...
for timer in ti.reset('progiter, enabled=False'):
for i in progiter.ProgIter(range(N), enabled=False):
...
for timer in ti.reset('progiter, homogeneous=True'):
for i in progiter.ProgIter(range(N), homogeneous=True):
...
for timer in ti.reset('progiter, homogeneous=auto'):
for i in progiter.ProgIter(range(N), homogeneous='auto'):
...
for timer in ti.reset('progiter, homogeneous=False'):
for i in progiter.ProgIter(range(N), homogeneous=False):
...
import tqdm
for timer in ti.reset('tqdm'):
for i in tqdm.tqdm(range(N)):
...
if 1:
from rich.live import Live
from rich.progress import Progress as richProgress
for timer in ti.reset('rich.progress'):
prog_manager = richProgress()
task_id = prog_manager.add_task(description='', total=N)
live_context = Live(prog_manager)
with live_context:
for i in range(N):
prog_manager.update(task_id, advance=1)
import pandas as pd
df = pd.DataFrame.from_dict(ti.rankings['mean'], orient='index', columns=['mean'])
df.loc[list(ti.rankings['min'].keys()), 'min'] = list(ti.rankings['min'].values())
df['mean_rel_overhead'] = df['mean'] / df.loc['baseline', 'mean']
df['min_rel_overhead'] = df['min'] / df.loc['baseline', 'min']
print(df.to_string())
def other_tests():
N = 100
###########
with ub.Timer(label='progiter fixed freq=10'):
for i in progiter.ProgIter(range(N), freq=10, adjust=False):
pass
with ub.Timer(label='ubelt fixed freq=10'):
for i in ub.ProgIter(range(N), freq=10, adjust=False):
pass
with ub.Timer(label='progiter fixed freq=1'):
for i in progiter.ProgIter(range(N), freq=1, adjust=False):
pass
with ub.Timer(label='ubelt fixed freq=1'):
for i in ub.ProgIter(range(N), freq=1, adjust=False):
pass
import timerit
import time
ti = timerit.Timerit(100000, bestof=10, verbose=2)
for timer in ti.reset('time.process_time()'):
with timer:
time.process_time()
for timer in ti.reset('time.process_time_ns()'):
with timer:
time.process_time_ns()
for timer in ti.reset('time.time()'):
with timer:
time.time()
for timer in ti.reset('time.time_ns()'):
with timer:
time.time_ns()
for timer in ti.reset('time.perf_counter()'):
with timer:
time.perf_counter()
for timer in ti.reset('time.perf_counter_ns()'):
with timer:
time.perf_counter_ns()
for timer in ti.reset('time.thread_time()'):
with timer:
time.thread_time()
for timer in ti.reset('time.monotonic()'):
with timer:
time.monotonic()
for timer in ti.reset('time.monotonic_ns()'):
with timer:
time.monotonic_ns()
print('ti.rankings = {}'.format(ub.repr2(ti.rankings, nl=2, align=':', precision=8)))
if __name__ == '__main__':
"""
CommandLine:
python ~/code/progiter/dev/benchmarks/bench_check.py
"""
basic_benchmark()
|
[
"erotemic@gmail.com"
] |
erotemic@gmail.com
|
88983ba58579d66596f8efac20102f270c9fba97
|
a97fb0584709e292a475defc8506eeb85bb24339
|
/source code/code-Python 3.0.1/ch405.py
|
6791b224e8992f7ce92ddf7376f1155f90e2870e
|
[] |
no_license
|
AAQ6291/PYCATCH
|
bd297858051042613739819ed70c535901569079
|
27ec4094be785810074be8b16ef84c85048065b5
|
refs/heads/master
| 2020-03-26T13:54:57.051016
| 2018-08-17T09:05:19
| 2018-08-17T09:05:19
| 144,963,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## 宣告user變數與passwd變數來接收使用者輸入的帳號與密碼
user = input("login:")
passwd = input("password (empty for guest):")
## 使用string.strip()函數將使用者輸入的空白字元刪除, 因為使用者可能會輸入空白字元
user = user.strip()
passwd = passwd.strip()
if (user == "" and passwd == "") or (user =="" and passwd !=""):
print("username or password cannot be empty.")
elif user == "admin" and passwd == "!d^*^BM(;.":
print("welcome administrator!")
elif user == "guest" and passwd == "":
print("welcome, you're guest.")
elif user == "huang" and passwd == "12345":
print("hello, huang!")
else:
print("wrong username or password.")
|
[
"angelak.tw@gmail.com"
] |
angelak.tw@gmail.com
|
64b5a847f9463a91bd9efb546c74636a4fb7aec1
|
8c73955b7b3b8e7893e8ff3d78341a99a66f6c12
|
/src/train.py
|
8c7ed1b65cf8e7129a30f83b730aafb482fdd906
|
[] |
no_license
|
akiFQC/shinra-attribute-extraction
|
ba9452d005830b6c24c80d166a8ff3bcf82a70b8
|
633f65ec5b61b8937fdf9cf24fe4ae07960e93fd
|
refs/heads/main
| 2023-07-14T03:22:16.927915
| 2021-08-12T08:34:03
| 2021-08-12T08:34:03
| 396,015,806
| 0
| 0
| null | 2021-08-14T13:22:14
| 2021-08-14T13:22:13
| null |
UTF-8
|
Python
| false
| false
| 5,631
|
py
|
import argparse
import sys
from pathlib import Path
import json
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
import torch.optim as optim
from transformers import AutoTokenizer, AutoModel
from tqdm import tqdm
from seqeval.metrics import f1_score, classification_report
import mlflow
from sklearn.model_selection import train_test_split
from dataset import ShinraData
from dataset import NerDataset, ner_collate_fn, decode_iob
from model import BertForMultilabelNER, create_pooler_matrix
from predict import predict
device = "cuda:1" if torch.cuda.is_available() else "cpu"
class EarlyStopping():
def __init__(self, patience=0, verbose=0):
self._step = 0
self._score = - float('inf')
self.patience = patience
self.verbose = verbose
def validate(self, score):
if self._score > score:
self._step += 1
if self._step > self.patience:
if self.verbose:
print('early stopping')
return True
else:
self._step = 0
self._score = score
return False
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", type=str, help="Specify input path in SHINRA2020")
parser.add_argument("--model_path", type=str, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--lr", type=float, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--bsz", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--epoch", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--grad_acc", type=int, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--grad_clip", type=float, help="Specify attribute_list path in SHINRA2020")
parser.add_argument("--note", type=str, help="Specify attribute_list path in SHINRA2020")
args = parser.parse_args()
return args
def evaluate(model, dataset, attributes, args):
total_preds, total_trues = predict(model, dataset, device)
total_preds = decode_iob(total_preds, attributes)
total_trues = decode_iob(total_trues, attributes)
f1 = f1_score(total_trues, total_preds)
return f1
def train(model, train_dataset, valid_dataset, attributes, args):
optimizer = optim.AdamW(model.parameters(), lr=args.lr)
# scheduler = get_scheduler(
# args.bsz, args.grad_acc, args.epoch, args.warmup, optimizer, len(train_dataset))
early_stopping = EarlyStopping(patience=10, verbose=1)
losses = []
for e in range(args.epoch):
train_dataloader = DataLoader(train_dataset, batch_size=args.bsz, collate_fn=ner_collate_fn, shuffle=True)
bar = tqdm(total=len(train_dataset))
total_loss = 0
model.train()
for step, inputs in enumerate(train_dataloader):
input_ids = inputs["tokens"]
word_idxs = inputs["word_idxs"]
labels = inputs["labels"]
labels = [pad_sequence([torch.tensor(l) for l in label], padding_value=-1, batch_first=True).to(device)
for label in labels]
input_ids = pad_sequence([torch.tensor(t) for t in input_ids], padding_value=0, batch_first=True).to(device)
attention_mask = input_ids > 0
pooling_matrix = create_pooler_matrix(input_ids, word_idxs, pool_type="head").to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
pooling_matrix=pooling_matrix)
loss = outputs[0]
loss.backward()
total_loss += loss.item()
mlflow.log_metric("Trian batch loss", loss.item(), step=(e+1) * step)
bar.set_description(f"[Epoch] {e + 1}")
bar.set_postfix({"loss": loss.item()})
bar.update(args.bsz)
if (step + 1) % args.grad_acc == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip
)
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
losses.append(total_loss / (step+1))
mlflow.log_metric("Trian loss", losses[-1], step=e)
valid_f1 = evaluate(model, valid_dataset, attributes, args)
mlflow.log_metric("Valid F1", valid_f1, step=e)
if early_stopping._score < valid_f1:
torch.save(model.state_dict(), args.model_path + "best.model")
if e + 1 > 30 and early_stopping.validate(valid_f1):
break
if __name__ == "__main__":
args = parse_arg()
bert = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese")
tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
# dataset = [ShinraData(), ....]
dataset = ShinraData.from_shinra2020_format(Path(args.input_path))
dataset = [d for d in dataset if d.nes is not None]
model = BertForMultilabelNER(bert, len(dataset[0].attributes)).to(device)
train_dataset, valid_dataset = train_test_split(dataset, test_size=0.1)
train_dataset = NerDataset([d for train_d in train_dataset for d in train_d.ner_inputs], tokenizer)
valid_dataset = NerDataset([d for valid_d in valid_dataset for d in valid_d.ner_inputs], tokenizer)
mlflow.start_run()
mlflow.log_params(vars(args))
train(model, train_dataset, valid_dataset, dataset[0].attributes, args)
torch.save(model.state_dict(), args.model_path + "last.model")
mlflow.end_run()
|
[
"suzzz428@gmail.com"
] |
suzzz428@gmail.com
|
93b678058a71e6771be47a04bd1790e2246fbffb
|
bbac91977974702ce52fc0be2e279dc7750f7401
|
/backend/serializers.py
|
435150b1d1ced60909c704c0a6783b467c8af0fc
|
[] |
no_license
|
Occy88/BiddingSystem
|
bbb0cc85a3621622cbbcb1313fbe1f1fc74a8f72
|
a8619bad0efee8d2256ef11f358d99c21e5a67b2
|
refs/heads/master
| 2023-01-13T08:53:16.450312
| 2019-12-19T18:19:59
| 2019-12-19T18:19:59
| 226,520,713
| 0
| 0
| null | 2022-12-13T00:50:21
| 2019-12-07T13:48:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
from rest_framework import serializers
from .models import Bid, Session
from django.contrib.auth.models import Group
from django.conf import settings
from pydoc import locate
from guardian.shortcuts import assign_perm, remove_perm
class BidSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
class Meta:
model = Bid
fields = ('id','user', 'time', 'price', 'quantity')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Bid.objects.create(**validated_data)
return bid
class SessionSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
bid_set = BidSerializer(many=True)
class Meta:
model = Session
fields = ('id', 'time_start' ,'active', 'bid_set')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Bid.objects.create(**validated_data)
return bid
class SessionSerializer(serializers.ModelSerializer):
# to work out all the fk relationships be clever about what to show...
# perhaps nothing?
# perhaps Groups?
# shipment_sites = serializers.PrimaryKeyRelatedField(many=True, queryset=ShipmentSite.objects.all())
bid_set = BidSerializer(many=True)
class Meta:
model = Session
fields = ('id', 'time_start', 'active', 'bid_set')
def create(self, validated_data):
"""
Create and return a new `supplier` instance, given the validated data.
"""
# validated_data.pop('shipments', None)
bid = Session.objects.create(**validated_data)
return bid
|
[
"octavio.delser@gmail.com"
] |
octavio.delser@gmail.com
|
9ef29961f26a8da8006d6b5e60b61e46c0c62589
|
50500f7e7afc0a401cfa99bdaf438d3db90072f2
|
/biothings_explorer/tests/test_biolink.py
|
4e4ee740fe09c60983633aab11272b78500c4821
|
[
"Apache-2.0"
] |
permissive
|
andrewsu/bte_schema
|
903112b8344512bfe608d77948d864bb134596df
|
b727dfded0d10b32ef215094715171ef94c38e34
|
refs/heads/master
| 2020-07-27T02:59:40.552124
| 2019-11-12T17:48:18
| 2019-11-12T17:48:18
| 208,845,945
| 0
| 0
|
Apache-2.0
| 2019-09-16T16:23:42
| 2019-09-16T16:23:42
| null |
UTF-8
|
Python
| false
| false
| 7,109
|
py
|
import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
class TestSingleHopQuery(unittest.TestCase):
def setUp(self):
self.reg = Registry()
def test_anatomy2gene(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='AnatomicalEntity',
input_id='bts:uberon',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='UBERON:0004720',
registry=self.reg)
seqd.query()
self.assertTrue('30881' in seqd.G)
def test_disease2gene(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='AnatomicalEntity',
input_id='bts:uberon',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='UBERON:0004720',
registry=self.reg)
seqd.query()
self.assertTrue('30881' in seqd.G)
def test_disease2pathway(self):
seqd = SingleEdgeQueryDispatcher(input_cls='DiseaseOrPhenotypicFeature',
input_id='bts:mondo',
output_cls='Pathway',
output_id='bts:reactome',
pred='bts:associatedWith',
values='MONDO:0018492',
registry=self.reg)
seqd.query()
self.assertTrue('R-HSA-110330' in seqd.G)
def test_disease2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='DiseaseOrPhenotypicFeature',
input_id='bts:mondo',
output_cls='PhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='MONDO:0010997',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0002063' in seqd.G)
def test_gene2anatomy(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='AnatomicalEntity',
output_id='bts:uberon',
pred='bts:associatedWith',
values='13434',
registry=self.reg)
seqd.query()
self.assertTrue('UBERON:0000988' in seqd.G)
def test_gene2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='PhenotypicFeature',
output_id='bts:hp',
pred='bts:associatedWith',
values='13434',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0040218' in seqd.G)
def test_geneinteraction(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Gene',
input_id='bts:entrez',
output_cls='Gene',
output_id='bts:hp',
pred='bts:molecularlyInteractsWith',
values='1017',
registry=self.reg)
seqd.query()
self.assertTrue('27230' in seqd.G)
def test_pathway2disease(self):
# test <chemical, interactswith, anatomy>
seqd = SingleEdgeQueryDispatcher(input_cls='Pathway',
input_id='bts:reactome',
output_cls='DiseaseOrPhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='R-HSA-210745',
registry=self.reg)
seqd.query()
self.assertTrue('MONDO:0017885' in seqd.G)
def test_pathway2phenotype(self):
seqd = SingleEdgeQueryDispatcher(input_cls='Pathway',
input_id='bts:reactome',
output_cls='PhenotypicFeature',
output_id='bts:hp',
pred='bts:associatedWith',
values='R-HSA-210745',
registry=self.reg)
seqd.query()
self.assertTrue('HP:0004904' in seqd.G)
def test_phenotype2disease(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='DiseaseOrPhenotypicFeature',
output_id='bts:mondo',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('MONDO:0010894' in seqd.G)
def test_phenotype2gene(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='Gene',
output_id='bts:hgnc',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('4195' in seqd.G)
def test_phenotype2pathway(self):
seqd = SingleEdgeQueryDispatcher(input_cls='PhenotypicFeature',
input_id='bts:hp',
output_cls='Pathway',
output_id='bts:reactome',
pred='bts:associatedWith',
values='HP:0004904',
registry=self.reg)
seqd.query()
self.assertTrue('R-HSA-210745' in seqd.G)
|
[
"kevinxin@scripps.edu"
] |
kevinxin@scripps.edu
|
3ffaa9adfc101e07ad05a1009bd90c9f436236e6
|
8ecf4930f9aa90c35e5199d117068b64a8d779dd
|
/TopQuarkAnalysis/SingleTop/test/Mu_2011A_08Nov_part_13_cfg.py
|
c75a13863b8955018a45775a881133abc70b43f0
|
[] |
no_license
|
fabozzi/ST_44
|
178bd0829b1aff9d299528ba8e85dc7b7e8dd216
|
0becb8866a7c758d515e70ba0b90c99f6556fef3
|
refs/heads/master
| 2021-01-20T23:27:07.398661
| 2014-04-14T15:12:32
| 2014-04-14T15:12:32
| 18,765,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("SingleTopSystematics")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
FailPath = cms.untracked.vstring('ProductNotFound','Type Mismatch')
)
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff") ### real data
process.GlobalTag.globaltag = cms.string("START44_V13::All")
#Load B-Tag
#MC measurements from 36X
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDBMC36X")
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDBMC36X")
##Measurements from Fall10
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1011")
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1011")
#Spring11
process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1107")
process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1107")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# Process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20000))
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'file:/tmp/mmerola/DataMerged.root',
#'rfio:/castor/cern.ch/user/m/mmerola/SingleTop_2012/MergedJune/DataMerged.root',
),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
#eventsToProcess = cms.untracked.VEventRange('1:19517967-1:19517969'),
)
#from Data import *
#process.source.fileNames = Data_ntuple
#process.source.fileNames = cms.untracked.vstring("file:/tmp/mmerola/DataMerged.root")
#PileUpSync
#Output
#process.TFileService = cms.Service("TFileService", fileName = cms.string("/castor/cern.ch/user/m/mmerola/SingleTop_2012/TreesJune/Mu_2011A_08Nov_part_13.root"))
process.TFileService = cms.Service("TFileService", fileName = cms.string("/tmp/mmerola/Mu_2011A_08Nov_part_13.root"))
#process.TFileService = cms.Service("TFileService", fileName = cms.string("testNoPU.root"))
#process.load("SingleTopAnalyzers_cfi")
process.load("SingleTopRootPlizer_cfi")
process.load("SingleTopFilters_cfi")
#from SingleTopPSets_cfi import *
#from SingleTopPSetsFall11_cfi import *
from SingleTopPSetsFall_cfi import *
process.TreesEle.dataPUFile = cms.untracked.string("pileUpDistr.root")
process.TreesMu.dataPUFile = cms.untracked.string("pileUpDistr.root")
#process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.channelInfo = DataEle
process.TreesMu.channelInfo = DataMu
#process.PlotsEle.channelInfo = DataEle
#process.PlotsMu.channelInfo = DataMu
#process.TreesMu.systematics = cms.untracked.vstring();
#doPU = cms.untracked.bool(False)
#process.WeightProducer.doPU = cms.untracked.bool(False)
#process.TreesMu.doQCD = cms.untracked.bool(False)
#process.TreesEle.doQCD = cms.untracked.bool(False)
#process.TreesMu.doResol = cms.untracked.bool(False)
#process.TreesEle.doResol = cms.untracked.bool(False)
#process.TreesMu.doPU = cms.untracked.bool(False)
#process.TreesEle.doPU = cms.untracked.bool(False)
channel_instruction = "mu" #SWITCH_INSTRUCTION
#channel_instruction = "allmc" #SWITCH_INSTRUCTION
MC_instruction = False #TRIGGER_INSTRUCTION
process.HLTFilterMu.isMC = MC_instruction
process.HLTFilterEle.isMC = MC_instruction
process.HLTFilterMuOrEle.isMC = MC_instruction
process.HLTFilterMuOrEleMC.isMC = MC_instruction
#process.PUWeightsPath = cms.Path(
# process.WeightProducer
#)
if channel_instruction == "allmc":
# process.TreesMu.doResol = cms.untracked.bool(True)
# process.TreesEle.doResol = cms.untracked.bool(True)
# process.TreesEle.doTurnOn = cms.untracked.bool(True)
process.PathSysMu = cms.Path(
process.HLTFilterMuMC *
process.TreesMu
)
process.PathSysEle = cms.Path(
process.HLTFilterEleMC *
process.TreesEle
)
if channel_instruction == "all":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesMu.doPU = cms.untracked.bool(False)
process.PathSys = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuOrEle *
process.TreesMu +
process.TreesEle
)
if channel_instruction == "mu":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
# process.HLTFilterMu *
process.HLTFilterMuData *
process.TreesMu
)
if channel_instruction == "ele":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEle *
process.TreesEle
)
if channel_instruction == "muqcd":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuQCD *
process.TreesMu
)
if channel_instruction == "eleqcd":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.TreesEle.isControlSample = cms.untracked.bool(True)
process.PathSysEle = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEleQCD *
process.TreesEle
)
process.source.fileNames = cms.untracked.vstring('file:/tmp/mmerola/Mu_2011A_08Nov_part_13Merged.root',)
|
[
"Francesco.Fabozzi@cern.ch"
] |
Francesco.Fabozzi@cern.ch
|
0c1eaf6ef7a2bb9f5dddea73b74a3522c161aa8e
|
31a0b0749c30ff37c3a72592387f9d8195de4bd6
|
/release/ray_release/cluster_manager/full.py
|
766362e1e86753db1292ea1840b740d60951cc41
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
longshotsyndicate/ray
|
15100bad514b602a3fa39bfe205288e7bec75d90
|
3341fae573868338b665bcea8a1c4ee86b702751
|
refs/heads/master
| 2023-01-28T15:16:00.401509
| 2022-02-18T05:35:47
| 2022-02-18T05:35:47
| 163,961,795
| 1
| 1
|
Apache-2.0
| 2023-01-14T08:01:02
| 2019-01-03T11:03:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,391
|
py
|
import time
from ray_release.exception import (
ClusterCreationError,
ClusterStartupError,
ClusterStartupTimeout,
ClusterStartupFailed,
)
from ray_release.logger import logger
from ray_release.cluster_manager.minimal import MinimalClusterManager
from ray_release.util import format_link, anyscale_cluster_url
REPORT_S = 30.0
class FullClusterManager(MinimalClusterManager):
"""Full manager.
Builds app config and compute template and starts/terminated session
using SDK.
"""
def start_cluster(self, timeout: float = 600.0):
logger.info(f"Creating cluster {self.cluster_name}")
try:
result = self.sdk.create_cluster(
dict(
name=self.cluster_name,
project_id=self.project_id,
cluster_environment_build_id=self.cluster_env_build_id,
cluster_compute_id=self.cluster_compute_id,
idle_timeout_minutes=self.autosuspend_minutes,
)
)
self.cluster_id = result.result.id
except Exception as e:
raise ClusterCreationError(f"Error creating cluster: {e}") from e
# Trigger session start
logger.info(f"Starting cluster {self.cluster_name} ({self.cluster_id})")
cluster_url = anyscale_cluster_url(
project_id=self.project_id, session_id=self.cluster_id
)
logger.info(f"Link to cluster: {format_link(cluster_url)}")
try:
result = self.sdk.start_cluster(self.cluster_id, start_cluster_options={})
cop_id = result.result.id
completed = result.result.completed
except Exception as e:
raise ClusterStartupError(
f"Error starting cluster with name "
f"{self.cluster_name} and {self.cluster_id} ({cluster_url}): "
f"{e}"
) from e
# Wait for session
logger.info(f"Waiting for cluster {self.cluster_name}...")
start_time = time.monotonic()
timeout_at = start_time + timeout
next_status = start_time + 30
while not completed:
now = time.monotonic()
if now >= timeout_at:
raise ClusterStartupTimeout(
f"Time out when creating cluster {self.cluster_name}"
)
if now >= next_status:
logger.info(
f"... still waiting for cluster {self.cluster_name} "
f"({int(now - start_time)} seconds) ..."
)
next_status += 30
# Sleep 1 sec before next check.
time.sleep(1)
result = self.sdk.get_cluster_operation(cop_id, _request_timeout=30)
completed = result.result.completed
result = self.sdk.get_cluster(self.cluster_id)
if result.result.state != "Running":
raise ClusterStartupFailed(
f"Cluster did not come up - most likely the nodes are currently "
f"not available. Please check the cluster startup logs: "
f"{cluster_url} (cluster state: {result.result.state})"
)
def terminate_cluster(self, wait: bool = False):
if self.cluster_id:
# Just trigger a request. No need to wait until session shutdown.
result = self.sdk.terminate_cluster(
cluster_id=self.cluster_id, terminate_cluster_options={}
)
if not wait:
return
# Only do this when waiting
cop_id = result.result.id
completed = result.result.completed
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
cluster_operation_response = self.sdk.get_cluster_operation(
cop_id, _request_timeout=30
)
cluster_operation = cluster_operation_response.result
completed = cluster_operation.completed
result = self.sdk.get_cluster(self.cluster_id)
while result.result.state != "Terminated":
time.sleep(1)
result = self.sdk.get_cluster(self.cluster_id)
def get_cluster_address(self) -> str:
return f"anyscale://{self.project_name}/{self.cluster_name}"
|
[
"noreply@github.com"
] |
longshotsyndicate.noreply@github.com
|
17a8ecc861f7d8cd4aca3b5ccae2459c6969877e
|
e6d4a87dcf98e93bab92faa03f1b16253b728ac9
|
/algorithms/python/reverseString/reverseString.py
|
8d30b5defc2ca207f40965dab0c6cfe7eaa1a921
|
[] |
no_license
|
MichelleZ/leetcode
|
b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f
|
a390adeeb71e997b3c1a56c479825d4adda07ef9
|
refs/heads/main
| 2023-03-06T08:16:54.891699
| 2023-02-26T07:17:47
| 2023-02-26T07:17:47
| 326,904,500
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/reverse-string/
# Author: Miao Zhang
# Date: 2021-02-03
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
return s.reverse()
|
[
"zhangdaxiaomiao@163.com"
] |
zhangdaxiaomiao@163.com
|
5d6169c3b0d31fc740377c9c1c9ff5e09fb5a40d
|
71bc873c20fbc45bb5e13095d2474496818a23f9
|
/service_coldstart/code/redundant_code/send_email_withAttachment.py
|
6027018a46aac7ab761d72c23456d9367df891aa
|
[] |
no_license
|
2877992943/lianyun
|
f31c44ea2e266bae51cae4fa464d1bae368c8d3f
|
a872d6cd1b2eff402bcccb326d33d086816d87af
|
refs/heads/master
| 2021-01-20T16:17:20.226401
| 2017-05-10T06:49:31
| 2017-05-10T06:49:31
| 90,830,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
#!/usr/bin/env python
#coding: utf-8
import pandas as pd
"""
http://www.cnblogs.com/leetao94/p/5460520.html
"""
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import email.MIMEMultipart
import email.MIMEText
import email.MIMEBase
import os.path
def SendEmail(fromAdd, toAdd, subject, attachfile, htmlText):
strFrom = fromAdd;
strTo = toAdd;
msg =MIMEText(htmlText);
msg['Content-Type'] = 'Text/HTML';
msg['Subject'] = Header(subject,'gb2312');
msg['To'] = strTo;
msg['From'] = strFrom;
smtp = smtplib.SMTP('smtp.exmail.qq.com');
smtp.login('yangrui@yunkecn.com','yr13371695096YR');
try:
smtp.sendmail(strFrom,strTo,msg.as_string());
finally:
smtp.close;
def send_with_attachment(From,To,filename,num_leads,csvName):
email_csv_name='temp_'+csvName+'.csv';#print '1',From,To,filename,num_leads,csvName
email_csv_name=email_csv_name.decode('utf-8')
if To.find('@yunkecn.com')==-1:
To='yangrui@yunkecn.com'
server = smtplib.SMTP('smtp.exmail.qq.com')
server.login('yangrui@yunkecn.com','yr13371695096YR')
# 构造MIMEMultipart对象做为根容器
main_msg = email.MIMEMultipart.MIMEMultipart()
# 构造MIMEText对象做为邮件显示内容并附加到根容器
text_msg = email.MIMEText.MIMEText("find the attachment please.")
main_msg.attach(text_msg)
# 构造MIMEBase对象做为文件附件内容并附加到根容器
contype = 'application/octet-stream'
maintype, subtype = contype.split('/', 1)
## 读入文件内容并格式化
## select num_leads
df=pd.read_csv(filename,encoding='utf-8');#print df.shape
if num_leads<df.shape[0]:
df=df[:num_leads];#print df.shape[0]
df.to_csv(filename,index=False,encoding='utf-8')
### if required num leads > df.shape[0],use df directly
data = open(filename, 'rb')
file_msg = email.MIMEBase.MIMEBase(maintype, subtype)
file_msg.set_payload(data.read( ))
data.close( )
email.Encoders.encode_base64(file_msg)
## 设置附件头
basename = os.path.basename(filename);print basename,filename,email_csv_name
file_msg.add_header('Content-Disposition',
'attachment', filename = filename)
main_msg.attach(file_msg)
# 设置根容器属性
main_msg['From'] = From
main_msg['To'] = To
main_msg['Subject'] = "attachment :%s email to user:%s"%(csvName,To)
main_msg['Date'] = email.Utils.formatdate( )
# 得到格式化后的完整文本
fullText = main_msg.as_string( )
# 用smtp发送邮件
try:
server.sendmail(From, To, fullText)
finally:
server.quit()
if __name__ == "__main__":
send_with_attachment('yangrui@yunkecn.com','yangrui@yunkecn.com','tmp.csv',5,'宁波_1999')# '宁波_1999' in csvName as csvname will not work
|
[
"2877992943@qq.com"
] |
2877992943@qq.com
|
f326d396098af535a466262caabde1b751d25673
|
e8fef7552fb0c354d1084d2b4e7bf16efb7b9021
|
/tests/__init__.py
|
de4f4ac5908f313553e18bad2d117aacd1fda75c
|
[
"MIT"
] |
permissive
|
TristanTTran/feets
|
180ea67030c88b9eebb77923fe1af6746f654b30
|
48b16a5f2b95c0a4c05b47a88b396250faf168d6
|
refs/heads/master
| 2023-03-28T01:07:48.636265
| 2021-03-26T01:28:36
| 2021-03-26T01:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""All feets tests"""
|
[
"jbc.develop@gmail.com"
] |
jbc.develop@gmail.com
|
e21891fa297ee7c4ca8a4e8d84c6160a9d883849
|
84c5ce2f75ec8d4d9704dc993682ba52745a3e12
|
/m_layer/m_layer.py
|
d1b535b8a4da916f3951a00e1000943a97f0ea0c
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
yang-song/google-research
|
d0610748ae8056cfa2f89345053b386e976a1b82
|
34119b64214af089db8c675e6a03b30a0f608f66
|
refs/heads/master
| 2022-12-16T06:05:40.561237
| 2020-09-17T20:39:13
| 2020-09-17T20:42:11
| 296,450,555
| 1
| 0
|
Apache-2.0
| 2020-09-17T21:55:10
| 2020-09-17T21:55:09
| null |
UTF-8
|
Python
| false
| false
| 3,595
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding=utf-8
"""Code for creating the M-layer as a keras layer."""
import tensorflow as tf
class MLayer(tf.keras.layers.Layer):
"""The M-layer: Lie Algebra generator-embedding and matrix exponentiation.
This is a Keras implementation of the M-layer described in (2020)[1].
#### References
[1]: Thomas Fischbacher, Iulia M. Comsa, Krzysztof Potempa, Moritz Firsching,
Luca Versari, Jyrki Alakuijala "Intelligent Matrix Exponentiation", ICML 2020.
TODO(firsching): add link to paper.
"""
def __init__(self,
dim_m,
matrix_init=None,
with_bias=False,
matrix_squarings_exp=None,
**kwargs):
"""Initializes the instance.
Args:
dim_m: The matrix to be exponentiated in the M-layer has the shape (dim_m,
dim_m).
matrix_init: What initializer to use for the matrix. `None` defaults to
`normal` initalization.
with_bias: Whether a bias should be included in layer after
exponentiation.
matrix_squarings_exp: None to compute tf.linalg.expm(M), an integer `k` to
instead approximate it with (I+M/2**k)**(2**k).
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self._dim_m = dim_m
self._rep_to_exp_tensor = None
self._matrix_init = matrix_init or 'normal'
self._with_bias = with_bias
self._matrix_bias = None
self._matrix_squarings_exp = matrix_squarings_exp
super(MLayer, self).__init__(**kwargs)
def build(self, input_shape):
dim_rep = input_shape[-1]
self._rep_to_exp_tensor = self.add_weight(
name='rep_to_exp_tensor',
shape=(dim_rep, self._dim_m, self._dim_m),
initializer=self._matrix_init,
trainable=True)
if self._with_bias:
self._matrix_bias = self.add_weight(
name='matrix_bias',
shape=(1, self._dim_m, self._dim_m),
initializer='uniform',
trainable=True)
super(MLayer, self).build(input_shape)
def call(self, x):
if not self._with_bias:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor, x)
else:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor,
x) + self._matrix_bias
if self._matrix_squarings_exp is None:
return tf.linalg.expm(mat)
# Approximation of exp(mat) as (1+mat/k)**k with k = 2**MATRIX_SQUARINGS_EXP
mat = mat * 0.5**self._matrix_squarings_exp + tf.eye(self._dim_m)
for _ in range(self.matATRIX_SQUARINGS_EXP):
mat = tf.einsum('...ij,...jk->...ik', mat, mat)
return mat
def compute_output_shape(self, input_shape):
return input_shape[0], self._dim_m, self._dim_m
def get_config(self):
config = dict(super().get_config())
config['dim_m'] = self._dim_m
config['matrix_init'] = self._matrix_init
config['with_bias'] = self._with_bias
config['matrix_squarings_exp'] = self._matrix_squarings_exp
return config
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
5311c7de6d4c19bc5c23b7024f94490d6e4da152
|
648f742d6db2ea4e97b83c99b6fc49abd59e9667
|
/common/vault/oas/models/global_parameters_create_global_parameter_request.py
|
13908154762177b2ca58227508b96bd210f59d49
|
[] |
no_license
|
jmiller-tm/replit
|
c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86
|
c8e6af3268c4ef8da66516154850919ea79055dc
|
refs/heads/main
| 2023-08-30T00:49:35.738089
| 2021-11-16T23:09:08
| 2021-11-16T23:09:08
| 428,809,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,259
|
py
|
# coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GlobalParametersCreateGlobalParameterRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'global_parameter': 'GlobalParametersGlobalParameter',
'initial_value': 'str'
}
attribute_map = {
'request_id': 'request_id',
'global_parameter': 'global_parameter',
'initial_value': 'initial_value'
}
def __init__(self, request_id=None, global_parameter=None, initial_value=None): # noqa: E501
"""GlobalParametersCreateGlobalParameterRequest - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._global_parameter = None
self._initial_value = None
self.discriminator = None
self.request_id = request_id
self.global_parameter = global_parameter
self.initial_value = initial_value
@property
def request_id(self):
"""Gets the request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
A unique string ID used for idempotency. Required. # noqa: E501
:return: The request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this GlobalParametersCreateGlobalParameterRequest.
A unique string ID used for idempotency. Required. # noqa: E501
:param request_id: The request_id of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def global_parameter(self):
"""Gets the global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:return: The global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: GlobalParametersGlobalParameter
"""
return self._global_parameter
@global_parameter.setter
def global_parameter(self, global_parameter):
"""Sets the global_parameter of this GlobalParametersCreateGlobalParameterRequest.
:param global_parameter: The global_parameter of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: GlobalParametersGlobalParameter
"""
if global_parameter is None:
raise ValueError("Invalid value for `global_parameter`, must not be `None`") # noqa: E501
self._global_parameter = global_parameter
@property
def initial_value(self):
"""Gets the initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
This will be used to create a `GlobalParameterValue` associated with the newly created `GlobalParameter`. The `effective_timestamp` of the created `GlobalParameterValue` will be the Unix epoch. Required. # noqa: E501
:return: The initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:rtype: str
"""
return self._initial_value
@initial_value.setter
def initial_value(self, initial_value):
"""Sets the initial_value of this GlobalParametersCreateGlobalParameterRequest.
This will be used to create a `GlobalParameterValue` associated with the newly created `GlobalParameter`. The `effective_timestamp` of the created `GlobalParameterValue` will be the Unix epoch. Required. # noqa: E501
:param initial_value: The initial_value of this GlobalParametersCreateGlobalParameterRequest. # noqa: E501
:type: str
"""
if initial_value is None:
raise ValueError("Invalid value for `initial_value`, must not be `None`") # noqa: E501
self._initial_value = initial_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GlobalParametersCreateGlobalParameterRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GlobalParametersCreateGlobalParameterRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"jmiller@jmiller-tm00769-mbp.nomad.thomac.net"
] |
jmiller@jmiller-tm00769-mbp.nomad.thomac.net
|
200c75fdff4fa1317c02a08f43dc506d3c02c506
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/330/usersdata/302/93449/submittedfiles/lista1.py
|
3c901a262a68d819c6cfc03bf35fedd1961bb928
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
# -*- coding: utf-8 -*-
n = int(input('DIgite a quantidade de valores da matriz: '))
a = []
for i in range(0,n,1):
a.append(float(input('Digite a%d:' %(i+1))))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
12f159b5cb51056d3aa2a0c1960d1e41b4d15d73
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/73/73.set-matrix-zeroes.661615342.Accepted.leetcode.python3.py
|
e41b638e253235c385e74103532f377b5bc54df6
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
class Solution:
def setZeroes(self, matrix):
if not matrix or not matrix[0]:
return
h = len(matrix)
w = len(matrix[0])
rows_to_remove = set()
cols_to_remove = set()
for i in range(h):
if i not in rows_to_remove:
for j in range(w):
if matrix[i][j] == 0:
rows_to_remove.add(i)
cols_to_remove.add(j)
for i in rows_to_remove:
for j in range(w):
matrix[i][j] = 0
for j in cols_to_remove:
for i in range(h):
matrix[i][j] = 0
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
27b7bea7760d2aae277db6002c3ca2da3c455373
|
b8842700c2e071408bfab20bd499c6a4a47d0ccc
|
/week8/longest_subseq.py
|
235e90d05bcab9175a7c4d4306fc0cd8ada18942
|
[] |
no_license
|
kobso1245/Algorithms
|
c74439b662d32b116b8ea3c7e6958b77d1a85bf5
|
1566a3b5e636f6a83f486382f17bff640da923a4
|
refs/heads/master
| 2016-08-10T10:11:14.314792
| 2015-10-05T21:36:45
| 2015-10-05T21:36:45
| 36,860,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
def longest_subs(elems):
table = [(0, "") for x in range(len(elems))]
max_elem = elems[0]
curr_max_elem = 0
max_length = 0
max_pos = len(elems)
for i in range(len(elems) - 1, -1, -1):
curr_max_elem = 0
curr_max_index = i
for j in range(i + 1, len(elems)):
if elems[i] < elems[j]:
if curr_max_elem < table[j][0]:
curr_max_elem = table[j][0]
curr_max_index = j
if curr_max_index == i:
table[i] = (1, str(elems[i]))
else:
table[i] = (table[curr_max_index][0] + 1,
str(elems[i]) + table[curr_max_index][1])
if table[i][0] > max_length:
max_pos = i
max_length = table[i][0]
print(table[max_pos][0])
print(table[max_pos][1])
longest_subs([6, 1, 5, 3, 1, 7, 2, 5, 7, 4])
|
[
"kalo.evt@mail.bg"
] |
kalo.evt@mail.bg
|
b43371ed5b350666c780f23524655c92cfef2f34
|
7c7236aa95ebebe241f04b98d55f3033b19dadc2
|
/cms/venv/Scripts/pip3.6-script.py
|
c9130b7fc695e3c6029ee434982930129888e925
|
[] |
no_license
|
taisuo/cms
|
57d792bb47d85bf6a4a39558a1bc34457a355c26
|
dd8baa834d6426a2ce7406ea0b74eab252ef7789
|
refs/heads/master
| 2020-08-03T13:49:44.754754
| 2019-10-08T04:00:21
| 2019-10-08T04:00:21
| 211,765,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
#!G:\pythonwork\server\cms\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"3148378928@qq.com"
] |
3148378928@qq.com
|
8cab479aead0b05b07915bd2b9ec453d151bf9d4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_capsules.py
|
32a574bf1d186e135bc230087ec65675e0aa1fde
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _CAPSULES():
def __init__(self,):
self.name = "CAPSULES"
self.definitions = capsule
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['capsule']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
41548045a42748d17ade7d29eac099167198820d
|
dd32c5788caab10641c055b89c8c08f7b8a05361
|
/pippin/config.py
|
427652c0d1c6b105281b48ba0a698b382be69a79
|
[
"MIT"
] |
permissive
|
jcraig5/Pippin
|
bae71ab24e819f02ca732d18560cfe3543b18050
|
85db01173dd4a4f12af39cd3a2bad3a713e8767d
|
refs/heads/master
| 2020-05-31T09:55:08.416338
| 2019-06-13T16:45:39
| 2019-06-13T16:45:39
| 190,225,226
| 0
| 0
|
MIT
| 2019-06-13T16:45:40
| 2019-06-04T15:09:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
import configparser
import inspect
import os
import logging
import hashlib
import shutil
import os
import shutil
import stat
def singleton(fn):
instance = None
def get(*args, **kwargs):
nonlocal instance
if instance is None:
instance = fn(*args, **kwargs)
return instance
return get
@singleton
def get_config():
filename = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../cfg.ini")
config = configparser.ConfigParser()
config.read(filename)
return config
def get_output_dir():
output_dir = get_config()['OUTPUT']['output_dir']
if output_dir.startswith("$"):
output_dir = os.path.expandvars(output_dir)
elif not output_dir.startswith("/"):
output_dir = os.path.abspath(os.path.dirname(inspect.stack()[0][1]) + "/../" + output_dir)
return output_dir
def get_output_loc(path):
if "$" in path:
path = os.path.expandvars(path)
if path.startswith("/"):
return path
else:
return os.path.join(get_output_dir(), path)
def get_hash(input_string):
return hashlib.sha256(input_string.encode('utf-8')).hexdigest()
@singleton
def get_logger():
return logging.getLogger("pippin")
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True, mode=0o775)
chown_dir(path)
def copytree(src, dst, symlinks=False, ignore=None):
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def chown_dir(directory):
global_config = get_config()
logger = get_logger()
try:
shutil.chown(directory, group=global_config["SNANA"]["group"])
except Exception as e:
logger.debug(str(e))
return
for root, dirs, files in os.walk(directory):
for d in dirs:
try:
shutil.chown(os.path.join(root, d), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, d)}")
for f in files:
try:
shutil.chown(os.path.join(root, f), group=global_config["SNANA"]["group"])
except Exception:
logger.debug(f"Chown error: {os.path.join(root, f)}")
if __name__ == "__main__":
c = get_config()
print(c.sections())
print(c.get("SNANA", "sim_dir"))
print(c["OUTPUT"].getint("ping_frequency"))
|
[
"samuelreay@gmail.com"
] |
samuelreay@gmail.com
|
0b227badbe12d72eda1d08bf7b85c82dca9a0cc6
|
008c5aa9d132fa2549e089ae8df2ef1ce15ad020
|
/response_timeout/middleware.py
|
dd8597205cd6de79d9e8ffdb02f0c87431542156
|
[
"MIT"
] |
permissive
|
movermeyer/django-response-timeout
|
0c0cedc5c838011d314c57e4ab42252639b350f7
|
38f7462ab71d967749efc3be914e2a7a2df80f33
|
refs/heads/master
| 2021-01-25T14:33:17.576026
| 2013-07-20T13:56:44
| 2013-07-20T13:56:44
| 123,708,880
| 0
| 0
|
MIT
| 2018-03-03T16:17:38
| 2018-03-03T16:17:38
| null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
from django.conf import settings
from django.utils.cache import patch_response_headers
class SetCacheTimeoutMiddleware(object):
"""
Request-phase middleware that sets the timeout of each response based on
the RESPONSE_CACHE_SECONDS
If using with UpdateCacheMiddleware, must be placed after so that it sets
the timeout before the cache is updated with the response.
"""
def process_response(self, request, response):
timeout = settings.RESPONSE_CACHE_SECONDS
patch_response_headers(response, timeout)
return response
|
[
"s.shanabrook@gmail.com"
] |
s.shanabrook@gmail.com
|
4cdc462c66dbf52bd65ffa90d47ca5bfebbb3f68
|
6e3e1834eaad3a0c97bf645238e59a0599e047b4
|
/blog/urls/entries.py
|
b1a1b1567818f862b31ae4101bac790aab9a1abd
|
[
"JSON"
] |
permissive
|
davogler/davsite
|
2dc42bfebb476d94f92520e8829999859deae80b
|
edd8ceed560690fa2c3eefde236416ffba559a2e
|
refs/heads/master
| 2021-01-19T06:31:20.655909
| 2014-01-03T19:04:13
| 2014-01-03T19:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
from django.conf.urls.defaults import *
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView, DateDetailView
from blog.models import Entry
entry_info_dict = {'queryset':Entry.live.all(), 'date_field': 'pub_date', }
urlpatterns = patterns('',
# Pagination for the equivalent of archive_index generic view.
# The url is of the form http://host/page/4/
# In urls.py for example, ('^blog/page/(?P<page>\d)/$', get_archive_index),
url(r'^$', 'blog.views.get_archive_index_first', ),
url(r'^page/(?P<page>\d)/$', 'blog.views.get_archive_index', ),
#(r'^$', 'django.views.generic.date_based.archive_index', entry_info_dict, 'blog_entry_archive_index'),
#(r'^(?P<year>\d{4})/$', YearArchiveView.as_view(), entry_info_dict, 'blog_entry_archive_year'),
url(r'^(?P<year>\d{4})/$', YearArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', MonthArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', DayArchiveView.as_view(**entry_info_dict), name= 'blog_entry_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', DateDetailView.as_view(**entry_info_dict), name= 'blog_entry_detail'),
)
|
[
"dave@sparkhouse.com"
] |
dave@sparkhouse.com
|
b2b0bcb274470509901337f8aafa2e13ec47fc33
|
278d7f4467a112416d1adfbcd3218033ff0fd9b3
|
/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py
|
ace084ddd452aab6f3f34cc7bc60057b5b0e2962
|
[] |
no_license
|
Young-1217/detection
|
e3d67938b454e955b5b7a82d5ae222e62f9545fb
|
6760288dac92e00ddc3e813ed0e1363c1fa1ce2d
|
refs/heads/main
| 2023-06-01T21:41:37.998947
| 2021-06-21T10:03:01
| 2021-06-21T10:03:01
| 371,868,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
_base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
total_epochs = 24
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
|
[
"noreply@github.com"
] |
Young-1217.noreply@github.com
|
f4c5184a6cca72d60a9a2cf2d98f15f9d2314811
|
f907f8ce3b8c3b203e5bb9d3be012bea51efd85f
|
/around_square.py
|
b2b414eff97fad0c386ab0b4c9d138fc545e7555
|
[] |
no_license
|
KohsukeKubota/Atcoder-practice
|
3b4b986395551443f957d1818d6f9a0bf6132e90
|
52554a2649445c2760fc3982e722854fed5b8ab1
|
refs/heads/master
| 2020-08-26T15:17:29.344402
| 2019-10-26T11:14:24
| 2019-10-26T11:14:24
| 217,052,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import math
N = int(input())
res = 0
for i in range(int(math.sqrt(N))+1):
val = i**2
if val > res:
res = val
print(res)
|
[
"kohsuke@KohsukeKubotas-MacBook-Air.local"
] |
kohsuke@KohsukeKubotas-MacBook-Air.local
|
02e4fbe4535d2b7c0983b305399c7b442082d716
|
251d56a94b0d879a07a3d47a41f21258fa452a1f
|
/soqt/lilac.py
|
4127feb783bb69597b471987d0d06cc245cd1b83
|
[] |
no_license
|
paroque28/arch4edu
|
d9bb5f5af008989454fe71677621149ae45c58cc
|
24df17749cf556ed668c3a886a698ecbdcca211c
|
refs/heads/master
| 2020-12-26T13:33:43.987122
| 2020-01-27T16:41:41
| 2020-01-27T16:41:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
#!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny'}]
update_on = [{'aur': None}]
repo_depends = ['coin']
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
|
[
"i@jingbei.li"
] |
i@jingbei.li
|
e007362f985c8d2e6793d6f6d5e7ba3a6cdecbdd
|
d6254d3a0996d7977816c167bc2af76677a52b87
|
/bigsi/cmds/search.py
|
cdbdd27ce5975e51c6880a3a79b9cbab407014af
|
[
"MIT"
] |
permissive
|
rpetit3/BIGSI
|
f95c57a58e4ccfdd3d098737d76962a44565163e
|
d3e9a310e6c91c887d7917ced5609b6002a67623
|
refs/heads/master
| 2020-04-03T11:30:04.683289
| 2018-06-08T14:47:33
| 2018-06-08T14:47:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
#! /usr/bin/env python
from __future__ import print_function
# from bigsi.utils import min_lexo
from bigsi.utils import seq_to_kmers
from bigsi.graph import BIGSI as Graph
import argparse
import os.path
import time
from Bio import SeqIO
import json
import logging
import sys
logger = logging.getLogger(__name__)
from bigsi.utils import DEFAULT_LOGGING_LEVEL
logger.setLevel(DEFAULT_LOGGING_LEVEL)
import operator
from bigsi.utils import convert_query_kmer
def per(i):
return float(sum(i))/len(i)
def parse_input(infile):
gene_to_kmers = {}
with open(infile, 'r') as inf:
for record in SeqIO.parse(inf, 'fasta'):
gene_to_kmers[record.id] = str(record.seq)
yield (record.id, str(record.seq))
# return gene_to_kmers
def _search(gene_name, seq, results, threshold, graph, output_format="json", pipe=False, score=False):
if pipe:
if output_format == "tsv":
start = time.time()
result = graph.search(seq, threshold=threshold, score=score)
diff = time.time() - start
if result:
for sample_id, percent in result.items():
print(
"\t".join([gene_name, sample_id, str(round(percent["percent_kmers_found"], 2)), str(round(diff, 2))]))
else:
print("\t".join([gene_name, "NA", str(0), str(diff)]))
elif output_format == "fasta":
samples = graph.sample_to_colour_lookup.keys()
print(" ".join(['>', gene_name]))
print(seq)
result = graph.search(seq, threshold=threshold, score=score)
result = sorted(
result.items(), key=operator.itemgetter(1), reverse=True)
for sample, percent in result:
percent = round(percent * 100, 2)
colour = int(graph.sample_to_colour_lookup.get(sample))
print(
" ".join(['>', gene_name, sample, "kmer-%i coverage %f" % (graph.kmer_size, percent)]))
presence = []
for kmer in seq_to_kmers(seq, graph.kmer_size):
kmer_presence = graph.graph.lookup(
convert_query_kmer(kmer))[colour]
sys.stdout.write(str(int(kmer_presence)))
sys.stdout.write('\n')
else:
result = {}
start = time.time()
result['results'] = graph.search(
seq, threshold=threshold, score=score)
diff = time.time() - start
result['time'] = diff
print(json.dumps({gene_name: result}))
else:
results[gene_name] = {}
start = time.time()
results[gene_name]['results'] = graph.search(
seq, threshold=threshold, score=score)
diff = time.time() - start
results[gene_name]['time'] = diff
return results
def search(seq, fasta_file, threshold, graph, output_format="json", pipe=False, score=False):
if output_format == "tsv":
print("\t".join(
["gene_name", "sample_id", str("kmer_coverage_percent"), str("time")]))
results = {}
if fasta_file is not None:
for gene, seq in parse_input(fasta_file):
results = _search(
gene_name=gene, seq=seq, results=results, threshold=threshold,
graph=graph, output_format=output_format, pipe=pipe, score=score)
else:
results = _search(
gene_name=seq, seq=seq, results=results, threshold=threshold,
graph=graph, output_format=output_format, pipe=pipe, score=score)
return results
|
[
"wave@phel.im"
] |
wave@phel.im
|
56aceb4af73c59684df5c5acbffce3711e16c735
|
abcfd07772ce75f34e51592189c29cf84d1a3611
|
/flask/lib/python3.6/site-packages/sqlparse/utils.py
|
a620f2d3c17fbfba4d56e6a4d8608c27420f4868
|
[] |
no_license
|
yuhaihui3435/p_mc
|
66d89bcccf214e53729b26a0f80ddee8797e9e3e
|
3039a5c691b649fc88e941a2553b1a7e0aac2a0a
|
refs/heads/master
| 2021-06-28T18:52:00.111385
| 2017-09-15T00:26:02
| 2017-09-15T00:26:58
| 103,524,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
import itertools
import re
from collections import deque
from contextlib import contextmanager
from sqlparse.compat import text_type
# This regular expression replaces the home-cooked parser that was here before.
# It is much faster, but requires an extra post-processing step to get the
# desired results (that are compatible with what you would expect from the
# str.splitlines() method).
#
# It matches groups of characters: newlines, quoted strings, or unquoted text,
# and splits on that basis. The post-processing step puts those back together
# into the actual lines of SQL.
SPLIT_REGEX = re.compile(r"""
(
(?: # Start of non-capturing group
(?:\r\n|\r|\n) | # Match any single newline, or
[^\r\n'"]+ | # Match any character series without quotes or
# newlines, or
"(?:[^"\\]|\\.)*" | # Match double-quoted strings, or
'(?:[^'\\]|\\.)*' # Match single quoted strings
)
)
""", re.VERBOSE)
LINE_MATCH = re.compile(r'(\r\n|\r|\n)')
def split_unquoted_newlines(stmt):
"""Split a string on all unquoted newlines.
Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite
character is inside of a string."""
text = text_type(stmt)
lines = SPLIT_REGEX.split(text)
outputlines = ['']
for line in lines:
if not line:
continue
elif LINE_MATCH.match(line):
outputlines.append('')
else:
outputlines[-1] += line
return outputlines
def remove_quotes(val):
"""Helper that removes surrounding quotes from strings."""
if val is None:
return
if val[0] in ('"', "'") and val[0] == val[-1]:
val = val[1:-1]
return val
def recurse(*cls):
"""Function decorator to help with recursion
:param cls: Classes to not recurse over
:return: function
"""
def wrap(f):
def wrapped_f(tlist):
for sgroup in tlist.get_sublists():
if not isinstance(sgroup, cls):
wrapped_f(sgroup)
f(tlist)
return wrapped_f
return wrap
def imt(token, i=None, m=None, t=None):
"""Helper function to simplify comparisons Instance, Match and TokenType
:param token:
:param i: Class or Tuple/List of Classes
:param m: Tuple of TokenType & Value. Can be list of Tuple for multiple
:param t: TokenType or Tuple/List of TokenTypes
:return: bool
"""
clss = i
types = [t, ] if t and not isinstance(t, list) else t
mpatterns = [m, ] if m and not isinstance(m, list) else m
if token is None:
return False
elif clss and isinstance(token, clss):
return True
elif mpatterns and any((token.match(*pattern) for pattern in mpatterns)):
return True
elif types and any([token.ttype in ttype for ttype in types]):
return True
else:
return False
def consume(iterator, n):
"""Advance the iterator n-steps ahead. If n is none, consume entirely."""
deque(itertools.islice(iterator, n), maxlen=0)
@contextmanager
def offset(filter_, n=0):
filter_.offset += n
yield
filter_.offset -= n
@contextmanager
def indent(filter_, n=1):
filter_.indent += n
yield
filter_.indent -= n
|
[
"125227112@qq.com"
] |
125227112@qq.com
|
ca7fe2126b290de9c15044feaa402731564a284c
|
846506a6c9023a21ff831c637f71cffd3b0aab62
|
/Python/X_Archive/AddLabelSplits.py
|
585014bdeeaa59c66a2eac711aee31123872d4e2
|
[] |
no_license
|
emonson/Weinfurt_DocFreq
|
b90378e57af7c17d32c72e5924a1b9af9f0f6584
|
1c9082d8ce4c0d002b6a65d446421840e24435fd
|
refs/heads/master
| 2020-05-29T21:05:01.397101
| 2015-01-22T13:53:53
| 2015-01-22T13:53:53
| 29,681,004
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
# After changing data labels to new dataXXX scheme, automating
# some of the sentence splits based on the Datamap...xls sheet
import xlrd
book = xlrd.open_workbook('Datamap with Sentence IDs.xls')
sh = book.sheet_by_index(0)
ids = sh.col_values(0,12)
sentences = sh.col_values(1,12)
f = open('LongVer_newLabelsPrelim_rev5.html','r')
# File has some non-ascii characters in it
whole = f.read().decode('latin_1')
for ii, id in enumerate(ids):
id = str(id)
if id.endswith(('a', 'b', 'c', 'd')):
oldStr = sentences[ii].strip()
newStr = '</font>\n<font size=${s_%s} color=${c_%s}>%s' % (id, id, oldStr)
whole = whole.replace(oldStr, newStr)
# Have to manually fix 92a and 95a since the text is identical
oldStr = 'We will keep health information and research data on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}></font>\n<font size=${s_data095a} color=${c_data095a}>'
newStr = 'We will keep health information and research data on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}>'
whole = whole.replace(oldStr, newStr)
oldStr = 'We will store this list on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}></font>\n<font size=${s_data095a} color=${c_data095a}>'
newStr = 'We will store this list on secure computers. </font>\n<font size=${s_data095a} color=${c_data095a}>'
whole = whole.replace(oldStr, newStr)
fout = open('LongVer_newLabelsPrelim2_rev5.html','w')
# for some reason must re-encode before writing
fout.write(whole.encode('latin_1'))
fout.close()
f.close()
# data179a didn't split properly for some reason
# data138b is a duplicate, so it overwrote data138
# split the 200s manually for now
# manually changed (their) mistake of data138b -> 138b
|
[
"emonson@cs.duke.edu"
] |
emonson@cs.duke.edu
|
75f33a50056f2286b145c44e8361185d4ff87561
|
16b4229a925a4e3b0e760f401d80c4d2adb793a9
|
/models/real_nvp/coupling_layer.py
|
d8b2581f2febe1eaab92fecde288e1f539fd103a
|
[
"MIT"
] |
permissive
|
ahmadkhajehnejad/real-nvp
|
4fc5a6a5f23fe076304b5652277a8de70ab4f068
|
15f51eb91388fc232334123ac11467c4d3aa3d33
|
refs/heads/master
| 2022-08-11T09:37:32.353605
| 2020-05-18T12:24:39
| 2020-05-18T12:24:39
| 261,586,549
| 0
| 0
|
MIT
| 2020-05-05T21:25:51
| 2020-05-05T21:25:50
| null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
import torch
import torch.nn as nn
from enum import IntEnum
from models.resnet import ResNet
from util import checkerboard_mask
class MaskType(IntEnum):
CHECKERBOARD = 0
CHANNEL_WISE = 1
class CouplingLayer(nn.Module):
"""Coupling layer in RealNVP.
Args:
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the `s` and `t` network.
num_blocks (int): Number of residual blocks in the `s` and `t` network.
mask_type (MaskType): One of `MaskType.CHECKERBOARD` or `MaskType.CHANNEL_WISE`.
reverse_mask (bool): Whether to reverse the mask. Useful for alternating masks.
"""
def __init__(self, in_channels, mid_channels, num_blocks, mask_type, reverse_mask):
super(CouplingLayer, self).__init__()
# Save mask info
self.mask_type = mask_type
self.reverse_mask = reverse_mask
# Build scale and translate network
if self.mask_type == MaskType.CHANNEL_WISE:
in_channels //= 2
self.st_net = ResNet(in_channels, mid_channels, 2 * in_channels,
num_blocks=num_blocks, kernel_size=3, padding=1,
double_after_norm=(self.mask_type == MaskType.CHECKERBOARD))
# Learnable scale for s
self.rescale = nn.utils.weight_norm(Rescale(in_channels))
def forward(self, x, sldj=None, reverse=True):
if self.mask_type == MaskType.CHECKERBOARD:
# Checkerboard mask
b = checkerboard_mask(x.size(2), x.size(3), self.reverse_mask, device=x.device)
x_b = x * b
st = self.st_net(x_b)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
s = s * (1 - b)
t = t * (1 - b)
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = x * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = (x + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
else:
# Channel-wise mask
if self.reverse_mask:
x_id, x_change = x.chunk(2, dim=1)
else:
x_change, x_id = x.chunk(2, dim=1)
st = self.st_net(x_id)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = x_change * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = (x_change + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
if self.reverse_mask:
x = torch.cat((x_id, x_change), dim=1)
else:
x = torch.cat((x_change, x_id), dim=1)
return x, sldj
class Rescale(nn.Module):
"""Per-channel rescaling. Need a proper `nn.Module` so we can wrap it
with `torch.nn.utils.weight_norm`.
Args:
num_channels (int): Number of channels in the input.
"""
def __init__(self, num_channels):
super(Rescale, self).__init__()
self.weight = nn.Parameter(torch.ones(num_channels, 1, 1))
def forward(self, x):
x = self.weight * x
return x
|
[
"chutechristopher@gmail.com"
] |
chutechristopher@gmail.com
|
7f77fb3562993641b617a5b8f28eb60e5b4690d8
|
615f83418985b80f2a2a47200acb08dfa9418fc7
|
/identities/widgets.py
|
f59c91e055afcef5e61baf70b159870a655016a6
|
[
"MIT"
] |
permissive
|
alejo8591/maker
|
a42b89ddc426da326a397765dc091db45dd50d8e
|
001e85eaf489c93b565efe679eb159cfcfef4c67
|
refs/heads/master
| 2016-09-06T19:36:01.864526
| 2013-03-23T06:54:21
| 2013-03-23T06:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Identities module widgets
"""
WIDGETS = {'widget_contact_me': {'title': 'My Contact Card',
'size': "95%"}}
def get_widgets(request):
"Returns a set of all available widgets"
return WIDGETS
|
[
"alejo8591@gmail.com"
] |
alejo8591@gmail.com
|
9fa1eef441427dbc31a6b3821675d6bfcc0a7512
|
da8adef15efbdacda32b19196b391f63d5026e3a
|
/ITMO/ML/Lab5/main.py
|
5fbef8d876250cd8a74ae41ce03cc26e95b369ec
|
[] |
no_license
|
rubcuadra/MachineLearning
|
05da95c1f800e6acbce97f6ca825bd7a41d806a6
|
aa13dd007a7954d50586cca6dd413a04db18ef77
|
refs/heads/master
| 2021-03-19T17:33:14.080691
| 2018-10-19T23:43:27
| 2018-10-19T23:43:27
| 100,544,903
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,440
|
py
|
'''
You should implement the feature selection algorithm based on the utility metric (the Filter method).
Implement several utility metrics and compare their performance at classification tasks.
https://en.wikipedia.org/wiki/Feature_selection
https://machinelearningmastery.com/an-introduction-to-feature-selection/
'''
#http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.chi2.html#sklearn.feature_selection.chi2
#http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.mutual_info_classif.html#sklearn.feature_selection.mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import mutual_info_classif, chi2, SelectKBest
from sklearn.metrics import accuracy_score, f1_score
from scipy.stats import chi2_contingency
from glob import glob
import pandas as pd
import numpy as np
def test_model(model,x_train,y_train,x_test,y_test):
model.fit(x_train, y_train)
pred = model.predict(x_test)
print('\t',x_train.shape )
print('\tAccuracy: ', accuracy_score(y_test, pred))
print('\tF-score: ', f1_score(y_test, pred, average='macro'))
#Folder with files/structure
# *_train.data
# *_train.labels
# *_valid.data
# *_valid.labels
def loadData(path):
X,Y,x,y = [],[],[],[]
with open( glob(f"{path}/*_train.data")[0] ,"r" ) as td: X = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_train.labels")[0] ,"r" ) as td: Y = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_valid.data")[0] ,"r" ) as td: x = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_valid.labels")[0] ,"r" ) as td: y = [ [int(v) for v in line.split()] for line in td ]
return (np.matrix(X),np.matrix(Y).A1,np.matrix(x),np.matrix(y).A1)
class VarianceThresh():
def __init__(self, threshold=0):
self.th = threshold
def fit(self,data):
v = np.var(data,axis=0).A1 #Get variances as vector
self.ixs = np.argwhere( v <= self.th )[:,0] #Get indexes to eliminate
def transform(self,data):
newData = []
ixs = list(self.ixs.copy()) + [-1] #to finish
c = ixs.pop(0)
for i,col in enumerate(data.T):
if i == c: c = ixs.pop(0) #new index to remove
else: newData.append( col.A1 ) #add
return np.matrix(newData).T
class ChiSquare: #Determine whether there is a significant difference between the expected frequencies and the observed frequencies in one or more categories.
def __init__(self, alpha = 0.5):
self.alpha = alpha
def fit(self,data,Y):
self.ixs = []
for i, X in enumerate(data.T):
dfObserved = pd.crosstab(Y,X.A1)
chi2, p, degrfree, expected = chi2_contingency(dfObserved.values)
# self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index = self.dfObserved.index)
if p<self.alpha: self.ixs.append(i) #SignLev
def transform(self,data):
newData = []
ixs = self.ixs + [-1] #to finish
c = ixs.pop(0)
for i,col in enumerate(data.T):
if i == c: c = ixs.pop(0) #new index to remove
else: newData.append( col.A1 ) #add
return np.matrix(newData).T
if __name__ == '__main__':
Xtrain,Ytrain,Xtest,Ytest = loadData("arcene")
#VT
VT = VarianceThresh(threshold=5000) #5000
VT.fit(Xtrain)
vtX_train = VT.transform(Xtrain) #Apply Selections
vtX_test = VT.transform(Xtest) #Apply Selections
#CHI2
CHI = SelectKBest(score_func=chi2, k=550) #SelectKBest(score_func=chi2, k=550) #ChiSquare(alpha=0.05)
CHI.fit(Xtrain,Ytrain)
CHIXtrain = CHI.transform(Xtrain)
CHIXtest = CHI.transform(Xtest)
#Different ML Techniques
MLT = [LogisticRegression(),RandomForestClassifier(),DecisionTreeClassifier(),SVC(kernel='linear')]
for model in MLT:
print(model.__class__.__name__)
print("\tFULL")
test_model( model, Xtrain, Ytrain, Xtest, Ytest )
print("\tVarianceThreshold")
test_model( model, vtX_train, Ytrain, vtX_test, Ytest )
print("\tCHI^2")
test_model( model, CHIXtrain, Ytrain, CHIXtest, Ytest )
|
[
"rubcuadra@gmail.com"
] |
rubcuadra@gmail.com
|
09dc4b8c4826d5758b97747fe4b4d51aa543a01a
|
5053116ea6876add7903bf9433a9cf5da6aa5cbb
|
/CI_final project/rbfnet/RBF.py
|
37606ac2732850630bf4bcd555651a1ffac725a9
|
[] |
no_license
|
Yasaman1997/Computatioanal-Intelligence
|
4c400643bc4eb8ab30e51fc1fe7e76cf2d2ca2e5
|
65607a5d3ff6e08269584f9055f4feba5358abe2
|
refs/heads/master
| 2020-12-27T07:41:18.100841
| 2020-02-02T19:29:28
| 2020-02-02T19:29:28
| 237,819,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,066
|
py
|
import math
import matplotlib.pyplot as plt
import numpy as np
import FCM
import code
class RBF:
def __init__(self, path, clusters, fuzziness_parameter, gama, n_class):
path = path
self.dataset = code.prepare_data()
self.n_cluster = clusters
self.m = fuzziness_parameter
self.n_class = n_class
self.c_raduis = gama
self.G_matrix = np.array([[0.0 for i in range(self.n_cluster)] for j in range(int(len(self.dataset) * 0.7))])
self.Y_matrix = np.array([[0 for i in range(self.n_class)] for j in range(int(len(self.dataset) * 0.7))])
self.W_matrix = None
self.G_matrix_test = np.array(
[[0.0 for i in range(self.n_cluster)] for j in range(int(len(self.dataset) - len(self.dataset) * 0.7))])
self.Y = [0.0 for i in range(int(len(self.dataset) - len(self.dataset) * 0.7))]
self.Output_matrix = np.array([[0.0 for i in range(self.n_class)] for j in range(int(len(self.dataset) * 0.7))])
def distance(self, point1, point2):
d = math.sqrt(sum((a - b) ** 2 for a, b in zip(point1, point2)))
if d == 0:
return 0.00000001
else:
return d
def get_uik(self, x, vi):
T1 = 0
T2 = float((self.distance(x, vi)))
for ck in self.C_matrix:
T3 = float(self.distance(x, ck))
T1 += pow(float(T2 / T3), 2 / (self.m - 1))
uik = 1 / T1
return uik
def compute_G(self, start, end, G):
g1 = []
g2 = []
for i in range(len(self.C_matrix)):
ci = np.array([[0.0, 0.0],
[0.0, 0.0]])
uik = 0
u = 0
for j in range(start, end):
if G == 0:
u = self.U_matrix[j - start][i]
else:
u = self.get_uik(self.dataset[j][0], self.C_matrix[i])
g = np.array([u ** self.m * self.dataset[j][0][0],
u ** self.m * self.dataset[j][0][1]]) - \
np.array([u ** self.m * float(self.C_matrix[i][0]),
u ** self.m * float(self.C_matrix[i][1])])
ci += [[g[0] ** 2, g[0] * g[1]], [g[0] * g[1], g[1] ** 2]]
uik += (u ** self.m)
ci = ci / uik
for j in range(start, end):
x = np.array([self.dataset[j][0][0],
self.dataset[j][0][1]])
if G == 0:
self.G_matrix[j - start][i] = math.exp(
-self.c_raduis * np.matmul(np.matmul(np.transpose(x - self.C_matrix[i]),
np.linalg.inv(ci)),
x - self.C_matrix[i]))
# g1.append(self.G_matrix)
# np.savetxt("G1.txt", g1)
else:
self.G_matrix_test[j - start][i] = math.exp(
-self.c_raduis * np.matmul(np.matmul(np.transpose(x - self.C_matrix[i]),
np.linalg.inv(ci)),
x - self.C_matrix[i]))
# g2.append(self.G_matrix_test)
# np.savetxt("G2.txt", g2)
def Run_Rbf(self):
np.random.shuffle(self.dataset)
for i in range(int(len(self.dataset) * 0.7)):
self.Y_matrix[i][self.dataset[i][1] - 1] = 1
fcm = FCM.FCM(self.n_cluster, self.dataset[0:int(len(self.dataset) * 0.7)], self.m) # ue FCM
self.U_matrix, self.C_matrix = fcm.clustering_algorithm()
self.compute_G(0, int(len(self.dataset) * 0.7), 0)
self.W_matrix = np.matmul(np.matmul(np.linalg.inv(np.matmul(np.transpose(self.G_matrix),
self.G_matrix)), np.transpose(self.G_matrix)),
self.Y_matrix)
self.Output_matrix = np.matmul(self.G_matrix, self.W_matrix)
print('W_matrix:')
print(self.W_matrix)
print('output:')
print(self.Output_matrix)
def rbf_test(self):
self.compute_G(int(len(self.dataset) * 0.7) + 1, len(self.dataset), 1)
self.Output_matrix = np.matmul(self.G_matrix_test, self.W_matrix)
# print(self.dataset[int(len(self.dataset) * 0.7)+1:len(self.dataset)])
for i in range(len(self.Output_matrix)):
self.Y[i] = np.argmax(self.Output_matrix[i]) + 1
print('y:')
print(self.Y)
print('predicted_output:')
print(self.Output_matrix)
def accuracy(self):
sum = 0.0
acc = []
start = int(len(self.dataset) * 0.7) + 1
end = len(self.dataset)
for i in range(start, end):
dif = self.dataset[i][1] - self.Y[i - start]
# plt.scatter(self.Y[i - start], c='green')
# plt.scatter(self.dataset[i][1], c='red')
plt.show()
if dif > 0 or dif < 0:
sum += 1
accuracy = 1 - sum / int(len(self.dataset) * 0.3)
acc.append(accuracy)
np.savetxt("acc.txt", acc)
print('accuracy:')
print(accuracy)
def run():
for i in range(2, 32, 2):
rbf = RBF("2clstrain1200.csv", 10, 2, 1, 2)
rbf.Run_Rbf()
rbf.rbf_test()
plt.scatter([rbf.C_matrix[0][0], rbf.C_matrix[1][0], rbf.C_matrix[2][0], rbf.C_matrix[3][0], rbf.C_matrix[4][0],
rbf.C_matrix[5][0], rbf.C_matrix[6][0], rbf.C_matrix[7][0]],
[rbf.C_matrix[0][1], rbf.C_matrix[1][1], rbf.C_matrix[2][1], rbf.C_matrix[3][1], rbf.C_matrix[4][1],
rbf.C_matrix[5][1], rbf.C_matrix[6][1], rbf.C_matrix[7][1]], color='black')
plt.show()
# print('accuracy:')
print(rbf.accuracy())
run()
|
[
"noreply@github.com"
] |
Yasaman1997.noreply@github.com
|
2a38b18643398080afdf4fbe89533401fdd3c67d
|
4c300a18ba13bed9e0fa933a9f6d01187e005468
|
/devrun/cmd/web.py
|
d2a193d5aa0a8dded14213a28127cad236a46f7a
|
[] |
no_license
|
smurfix/devrun
|
08def56eda1090e9489b2f3c33d2a26d9a143277
|
d6098fafc79c81d65468a6cbdaaf0b8633ebde97
|
refs/heads/master
| 2021-01-25T01:22:05.091114
| 2017-06-22T10:54:40
| 2017-06-22T10:54:40
| 94,753,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of devrun, a comprehensive controller and monitor for
## various typed code.
##
## devrun is Copyright © 2016 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
import sys
import asyncio
import inspect
from collections.abc import Mapping
from . import BaseCommand
from devrun.web import App
class Command(BaseCommand):
"Run a web server"
help = """\
web
-- run a web server. Usage: web [[bind-to] port]
Defaults: any 9980
"""
app = None
bindto = '0.0.0.0'
port = 9980
async def run(self, *args):
self.loop = self.opt.loop
if len(args) > 2:
print("Usage: run", file=sys.stderr)
return 1
if args:
self.port = atoi(args[-1])
if len(args) > 1:
self.bindto = args[0]
self.app = App(self)
await self.app.start(self.bindto,self.port)
while True:
await asyncio.sleep(9999,loop=self.loop)
async def stop(self):
if self.app is not None:
await self.app.stop()
await super().stop()
|
[
"matthias@urlichs.de"
] |
matthias@urlichs.de
|
a32077954feaaf66d2e4a23b6ccbfb1d5d8009ef
|
bcc199a7e71b97af6fbfd916d5a0e537369c04d9
|
/leetcode/solved/53_Maximum_Subarray/solution.py
|
4b9dd9a6ff3ffd63395f61fdf9315a2b1fb9c7b9
|
[] |
no_license
|
sungminoh/algorithms
|
9c647e82472905a2c4e505c810b622b734d9d20d
|
1389a009a02e90e8700a7a00e0b7f797c129cdf4
|
refs/heads/master
| 2023-05-01T23:12:53.372060
| 2023-04-24T06:34:12
| 2023-04-24T06:34:12
| 87,406,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
Constraints:
1 <= nums.length <= 105
-104 <= nums[i] <= 104
Follow up: If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
"""
import sys
from typing import List
import pytest
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
"""
Time complexity: O(n)
Space complexity: O(1)
"""
ret = nums[0]
prevsum = nums[0]
for n in nums[1:]:
prevsum = max(n, prevsum + n)
ret = max(ret, prevsum)
return ret
@pytest.mark.parametrize('nums, expected', [
([-2,1,-3,4,-1,2,1,-5,4], 6),
([1], 1),
([5,4,-1,7,8], 23),
])
def test(nums, expected):
assert expected == Solution().maxSubArray(nums)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
[
"smoh2044@gmail.com"
] |
smoh2044@gmail.com
|
eeed637ae329962457dea76e8a46dd6381f588f2
|
1832a909b2c564bc623bca36dd3eea8c5587e2db
|
/server/core/migrations/0007_auto_20170206_0100.py
|
30aa6f8b8b22f8af086a24dce4fe314c5837a1d1
|
[] |
no_license
|
bravesoftdz/tramsdaol
|
ea4370b93ccba6ba569e948d552bab89042fb337
|
e726a00a0296454582f18d18956140c67be8cf8d
|
refs/heads/master
| 2020-03-20T02:57:25.088779
| 2017-04-01T01:31:49
| 2017-04-01T01:31:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 01:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_geographiccoordinate_search_address'),
]
operations = [
migrations.AlterUniqueTogether(
name='geographiccoordinate',
unique_together=set([('lat', 'lng', 'address', 'search_address')]),
),
]
|
[
"johni.douglas.marangon@gmail.com"
] |
johni.douglas.marangon@gmail.com
|
fe74a07943cbbc198295913a72e0c97901d333e8
|
4bbb67ae8d51c29641b153371e1f404b9af404f0
|
/tetris/tetris.py
|
d54e00fbee9dd349be46d1875ea764db646c4d88
|
[] |
no_license
|
timurbakibayev/python_advanced
|
e328dd3b26d4b94a05e9218be483e97b149fa8bf
|
da7beace64e2c17c447efec314d757f8181b6acf
|
refs/heads/master
| 2023-01-30T04:07:19.296989
| 2020-12-08T12:47:34
| 2020-12-08T12:47:34
| 291,979,925
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,905
|
py
|
import pygame
import random
pygame.init()
colors = [
(0, 0, 0),
(120, 37, 179),
(100, 179, 179),
(80, 34, 22),
(80, 134, 22),
(180, 34, 22),
(180, 34, 122),
]
transparent = (255,255,255,40)
figures = [
[[1, 5, 9, 13], [4, 5, 6, 7]],
[[4, 5, 9, 10], [2, 6, 5, 9]],
[[6, 7, 9, 10], [1, 5, 6, 10]],
[[1, 2, 5, 9], [0, 4, 5, 6], [1, 5, 9, 8], [4, 5, 6, 10]],
[[1, 2, 6, 10], [5, 6, 7, 9], [2, 6, 10, 11], [3, 5, 6, 7]],
[[1, 4, 5, 6], [1, 4, 5, 9], [4, 5, 6, 9], [1, 5, 6, 9]],
[[1, 2, 5, 6]],
]
score = 0
class Figure:
def __init__(self,x,y):
self.x = x
self.y = y
self.type = random.randint(0, len(figures)-1)
self.color = random.randint(1, len(colors)-1)
self.rotation = 0
def image(self):
return figures[self.type][self.rotation]
def rotate(self):
self.rotation = (self.rotation + 1) % len(figures[self.type])
if self.intersects():
self.rotation = (self.rotation - 1) % len(figures[self.type])
def intersects(self):
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in self.image():
x = self.x + j
y = self.y + i
if x > width-1:
return True
if x < 0:
return True
if y > height-1:
return True
if field[y][x] > 0:
return True
return False
def freeze(self):
global score
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in self.image():
x = self.x + j
y = self.y + i
field[y][x] = self.color
lines = 0
for i in range(1, height):
zeros = field[i].count(0)
if zeros == 0:
lines += 1
for i1 in range(i,1,-1):
for j in range(width):
field[i1][j] = field[i1-1][j]
score += lines*2
size = (400, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Tetris")
done = False
clock = pygame.time.Clock()
fps = 25
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRAY = (128, 128, 128)
RED = (200, 0, 0)
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Game Over", True, RED)
font_score = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("Game Over", True, RED)
height = 20
width = 10
field = []
zoom = 20
x,y = 100,40
counter = 0
game_over = False
for i in range(height):
new_line = []
for j in range(width):
new_line.append(0)
field.append(new_line)
figure = Figure(3,0)
while not done:
# Game update
if not game_over:
counter += 1
if counter % 5 == 0:
figure.y += 1
if figure.intersects():
figure.y -= 1
figure.freeze()
figure = Figure(3,0)
if figure.intersects():
game_over = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if not game_over:
if event.key == pygame.K_LEFT:
figure.x -= 1
if figure.intersects():
figure.x += 1
if event.key == pygame.K_RIGHT:
figure.x += 1
if figure.intersects():
figure.x -= 1
if event.key == pygame.K_UP:
figure.rotate()
if event.key == pygame.K_DOWN or event.key == pygame.K_SPACE:
while not figure.intersects():
figure.y += 1
figure.y -= 1
screen.fill(WHITE)
for i in range(height):
for j in range(width):
pygame.draw.rect(screen, GRAY, [x + zoom * j, y + zoom * i, zoom, zoom], 1)
if field[i][j] > 0:
pygame.draw.rect(screen, colors[field[i][j]], [x + zoom * j, y + zoom * i, zoom, zoom])
if figure is not None:
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in figure.image():
pygame.draw.rect(screen, colors[figure.color], [
x + zoom * (j + figure.x),
y + zoom * (i + figure.y),
zoom, zoom])
score_pic = font.render(str(score), True, RED)
screen.blit(score_pic, (25, 25))
if game_over:
screen.blit(text, (100,(height*zoom+y)//2))
pygame.display.flip()
clock.tick(fps)
pygame.quit()
|
[
"timurbakibayev@gmail.com"
] |
timurbakibayev@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.