blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
715954a2482e5085e098e307bb78aba19ebfadb5
|
38797130eaa7f4eb24ba54d5785820b6d0c40672
|
/axsemantics/resources.py
|
92f6310fbb3dbc4e9711da515f493a71b26d0e80
|
[
"MIT"
] |
permissive
|
niklaskks/axsemantics-python
|
92cf438fc3250da68e605efd782935109e84994c
|
5f80ec8e91be040a7fc409b44f321f666a351396
|
refs/heads/master
| 2021-01-19T18:01:02.925525
| 2016-05-23T09:15:36
| 2016-05-23T09:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
from axsemantics import constants
from axsemantics.base import (
APIResource,
ListResource,
)
from axsemantics.mixins import(
ContentGenerationMixin,
CreateableMixin,
DeleteableMixin,
ListableMixin,
UpdateableMixin,
)
from axsemantics.utils import create_object
class ThingList(ListResource):
class_name = 'thing'
def __init__(self, cp_id, *args, **kwargs):
self.cp_id = cp_id
super(ThingList, self).__init__(*args, **kwargs)
def __next__(self):
if self.current_index >= len(self.current_list):
if self.next_page:
self._update()
else:
raise StopIteration
self.current_index += 1
return create_object(self.current_list[self.current_index - 1], api_token=self.api_token, _type=self.class_name, cp_id=self.cp_id)
class Thing(CreateableMixin, UpdateableMixin, DeleteableMixin, ListableMixin, ContentGenerationMixin, APIResource):
class_name = 'thing'
required_fields = ['uid', 'name', 'content_project']
list_class = ThingList
def __init__(self, cp_id=None, **kwargs):
super(Thing, self).__init__(**kwargs)
self['content_project'] = cp_id
def instance_url(self):
url = '/{}/content-project/{}/thing/'.format(
constants.API_VERSION,
self['content_project'],
)
if self['id']:
url += '{}/'.format(self['id'])
return url
class ContentProject(CreateableMixin, DeleteableMixin, ListableMixin, ContentGenerationMixin, APIResource):
class_name = 'content-project'
required_fields = ['name', 'engine_configuration']
def __init__(self, api_token=None, **kwargs):
super(ContentProject, self).__init__(api_token=api_token, **kwargs)
def things(self):
if self['id']:
thing_url = '{}thing/'.format(self.instance_url())
return ThingList(cp_id=self['id'], api_token=self.api_token, class_name=self.class_name, initial_url=thing_url)
class ContentProjectList(ListResource):
initial_url = ContentProject.class_url()
class_name = 'content-project'
|
[
"rixx@cutebit.de"
] |
rixx@cutebit.de
|
f7a79c683a39a157ca334486bf2720da61880c5f
|
f0fa96d39a66c3ddaae4266442a13ec3feb7a462
|
/binary_search/ceil_ele_sorted_arr.py
|
b38b7525c50fb69b6e0caad422b862b18198fcdd
|
[] |
no_license
|
ashishgupta2014/problem_solving_practices
|
14d587e98d9996a95efe822335ca4baccb39b1a1
|
bc4f4b07e1e33273010e34428e0c31d2d6656c14
|
refs/heads/master
| 2023-04-26T03:47:40.766508
| 2021-06-07T04:55:52
| 2021-06-07T04:55:52
| 298,063,915
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
def binay_search_itr(arr, x):
"""
https://www.geeksforgeeks.org/ceiling-in-a-sorted-array/
Given a sorted array and a value x, the ceiling of x is the smallest element in array greater than or equal to x,
and the floor is the greatest element smaller than or equal to x. Assume than the array is sorted in non-decreasing
order. Write efficient functions to find floor and ceiling of x.
Examples :
For example, let the input array be {1, 2, 8, 10, 10, 12, 19}
For x = 0: floor doesn't exist in array, ceil = 1
For x = 1: floor = 1, ceil = 1
For x = 5: floor = 2, ceil = 8
For x = 20: floor = 19, ceil doesn't exist in array
In below methods, we have implemented only ceiling search functions. Floor search can be implemented in the same way.
Method 1 (Linear Search)
Algorithm to search ceiling of x:
1) If x is smaller than or equal to the first element in array then return 0(index of first element)
2) Else Linearly search for an index i such that x lies between arr[i] and arr[i+1].
3) If we do not find an index i in step 2, then return -1
:param arr:
:param x:
:return:
"""
l = 0
h = len(arr) - 1
res = -1
while l <= h:
m = l + (h - l // 2)
if arr[m] == x:
return arr[m]
elif arr[m] > x:
res = arr[m]
h = m - 1
else:
l = m + 1
return res
print(binay_search_itr([1, 2, 8, 10, 10, 12, 19], 5))
|
[
"ashish.2007g@gmail.com"
] |
ashish.2007g@gmail.com
|
f2db1d747f336b5f33ca131fd532125e465c57d1
|
12f83344cdfe561db39ad9106dbf263ccd919f7e
|
/Projects/miami_metro/social_discovery/pipeline_constants.py
|
f6e86c7a8ce6044eaf24b5533b5971606e998a59
|
[] |
no_license
|
TopWebGhost/Angular-Influencer
|
ebcd28f83a77a92d240c41f11d82927b98bcea9e
|
2f15c4ddd8bbb112c407d222ae48746b626c674f
|
refs/heads/master
| 2021-01-19T10:45:47.039673
| 2016-12-05T01:59:26
| 2016-12-05T01:59:26
| 82,214,998
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
# QUEUES for Pipeline modules categories
CREATORS_QUEUE_NAME = 'profiles_pipeline_creators'
CLASSIFIERS_QUEUE_NAME = 'profiles_pipeline_classifiers'
PROCESSORS_QUEUE_NAME = 'profiles_pipeline_processors'
UPGRADERS_QUEUE_NAME = 'profiles_pipeline_upgraders'
CONNECT_PROFILES_QUEUE_NAME = 'profiles_pipeline_connect_to_influencers'
# Queues for that youtube-link in profiles tasks.
YOUTUBE_CREATORS_QUEUE_NAME = 'profiles_pipeline_creators_youtube'
YOUTUBE_CLASSIFIERS_QUEUE_NAME = 'profiles_pipeline_classifiers_youtube'
YOUTUBE_PROCESSORS_QUEUE_NAME = 'profiles_pipeline_processors_youtube'
YOUTUBE_UPGRADERS_QUEUE_NAME = 'profiles_pipeline_upgraders_youtube'
YOUTUBE_PIPELINE_QUEUE_NAME = 'social_profiles_pipeline_youtube'
QUEUE_TO_REFETCH_PROFILES = 'social_profiles_refetch_queue'
# name of queue for pipelines' tasks (obsolete?)
PIPELINE_QUEUE_NAME = 'social_profiles_pipeline'
# for different types of reprocess logic
REPROCESS_PROFILES_QUEUE_NAME = 'reprocess_profiles'
# This is a value of minimum friends count of profile. Profiles with lesser friends will be skipped automatically.
# Default value is 1000
MINIMUM_FRIENDS_COUNT = 1000
def get_queue_name_by_pipeline_step(klassname=None):
"""
returns queue name for particular step of pipeline (simply according to naming)
:param klassname: name of pipeline's step
:return: name of queue to put task in
"""
if isinstance(klassname, str):
klassname = klassname.lower()
if 'haveyoutube' in klassname.lower():
if 'creator' in klassname:
return YOUTUBE_CREATORS_QUEUE_NAME
elif 'classifier' in klassname:
return YOUTUBE_CLASSIFIERS_QUEUE_NAME
elif 'processor' in klassname:
return YOUTUBE_PROCESSORS_QUEUE_NAME
elif 'upgrader' in klassname:
return YOUTUBE_UPGRADERS_QUEUE_NAME
else:
return YOUTUBE_PIPELINE_QUEUE_NAME
if 'creator' in klassname:
return CREATORS_QUEUE_NAME
elif 'classifier' in klassname:
return CLASSIFIERS_QUEUE_NAME
elif 'processor' in klassname:
return PROCESSORS_QUEUE_NAME
elif 'upgrader' in klassname:
return UPGRADERS_QUEUE_NAME
else:
return PIPELINE_QUEUE_NAME
return None
|
[
"ivanfridrich1981129@yandex.com"
] |
ivanfridrich1981129@yandex.com
|
63c74473adda43589e07e31deb7538094f80aea3
|
615e9d142587c965d4f593ce68cae1811824026d
|
/22-functions/javoblar-22-02.py
|
5232d042489e863a3b22af62d546f0f804cd2c91
|
[] |
no_license
|
XurshidbekDavronov/python-darslar
|
0100bb8ea61c355949e81d1d3f3b923befeb80c9
|
4fcf9a3e0c2facdedaed9b53ef806cdc0095fd9d
|
refs/heads/main
| 2023-06-21T03:33:19.509225
| 2021-07-13T13:04:56
| 2021-07-13T13:04:56
| 377,176,205
| 1
| 0
| null | 2021-06-15T13:40:33
| 2021-06-15T13:40:32
| null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
"""
24/12/2020
Dasturlash asoslari
#22-dars: *args va **kwargs
Muallif: Anvar Narzullaev
Web sahifa: https://python.sariq.dev
"""
def talaba_info(ism, familiya, **kwargs):
kwargs['ism']=ism
kwargs['familiya']=familiya
return kwargs
talaba = talaba_info('olim','olimov',tyil=1995,fakultet='IT',yonalish='AT')
|
[
"anvarbek@gmail.com"
] |
anvarbek@gmail.com
|
739ba7fb3598067a85c67fefdb82248cd9e6b11f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02410/s505213341.py
|
11027f146f2932df5fc0f7919c70fcbe05860869
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
n,m=map(int,input().split())
A=[list(map(int,input().split())) for i in range(n)]
B=[int(input())for i in range(m)]
for i in range(n):
kotae=0
for j in range(m):
kotae+=A[i][j]*B[j]
print(kotae)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2426d938b5176cd170534198a1082ae7f41d8643
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/source_code/source_code_of_lp3thw/ex41.py
|
da1766ed9d59b79276ec183aa6177f3cb3732590
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,262
|
py
|
'''
# 关键词 训练:
class :告诉python你要做个新型式的东西出来。
object:有两层意思:第一,事物的最基础的类型;第二,any instance(建议、情况?) of thing.
instance:建议、情况。当你告诉 python 去创作一个 class 的时候,你得到的东西。
def :你在 class 里你定义了一个函数。
self :在 class 里的函数,self是一个为 instance、object可以被访问的一个变量。
inheritance :继承。这个概念是说一个 class 可以继承另一个 class 的特质,就像你和你的父母一样。
composition :合成。这个概念是说一个 class 可以由其他几个 class 进行合成,类似于汽车有4个轮子
attribute :特质、属性。class 所具有的特质,常常是通过合成得到的,并且通常是变量。
is-a:这是说这个东西是从其他东西合成的,或者说具有一种trait(特性),举个例子鲑鱼has-a嘴。
你最好做一些闪存卡,以更好的记住这些东西。
# 短语 训练:
1.class X(Y):制作一个 叫 X 的 class,这个 class 中有 Y(制作了一条鱼X,这条鱼有嘴 Y)。
2.class X(object):def _init_(slef,J): class X 具有一个叫做 M 的函数,这个函数具有 self和 J 两个参数。
3.foo =X():把 foo 设置给 classX 的情况。
4.foo.M(J):从 foo 里,获得 M 函数,并且 使用参数 self 和 J来call 它
5.foo.K = Q: 从 foo 里获得 K 特性,并把它这个特性赋值给 Q。
在上面这些里,当你看到 XYMJKQ 以及 foo,你可以对待他们像对待空白点一样。举个例子,你可以像下面这种方法来写:
1."制作一个 叫 ??? 的 class,这个 class 中有 Y"
2."class???具有一个_init_它具有 self 和 ???变量 "
3.class???具有一个函数,函数名为???这个函数具有 self 和???参数。
4.把 foo 设置给一个 class??? 的 instance
5.从 foo 中获得???函数,并且使用 self=???和参数???来call 它。
6.从 foo 里,得到???特质,并且把它设置赋值给???
# 联结训练:
1.拿短语卡片并且训练。
2.练习再练习。
# 一个阅读测试
我准备了一个小的 hack 代码,用来训练你。下面就是代码,这些代码你应该敲进oop_test.py来使用。
#下面是ex41.py的代码:
'''
import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
# 楼下写的像狗屎一样,鉴定完毕!20180319
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":#下面代码里没有
"class %%% has-a function *** that takes self and @@@",#下面代码里没有
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with parameters self,@@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
'''
来自这了的代码http://blog.csdn.net/github_37430159/article/details/54808102
'''
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(),encoding = 'utf-8'))
def convert(snippet, phrase):
class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results =[]
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(','.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class class_names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other class_names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameters lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
'''
20180318 代码错误:
bogon:lp3thw yyy$ python ex41.py
File "ex41.py", line 63
"class %%%(object):\n\tdef ***(self, @@@)":
^
SyntaxError: invalid syntax
暂时未解决。
'''
# 一些观点20180319:
'''
看ex40和 ex41 看的恶心了,也没闹明白这个老外到底在讲什么,翻开廖雪峰大神的网站,可算明白类的概念和实例的概念了。
# 小结:面向对象最重要的概念就是类(Class)和实例(Instance),必须牢记类是抽象的模板,比如Student类,而实例是根据类创建出来的一个个具体的“对象”,每个对象都拥有相同的方法,但各自的数据可能不同。
# 参考地址:1.https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/0014318645694388f1f10473d7f416e9291616be8367ab5000
# 2.https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001431864715651c99511036d884cf1b399e65ae0d27f7e000
# 另:廖神的网站做的太酷了,这种以 wiki 的形式来生成自己的知识架构是在是一件很棒的事情,我怎么来做呢?
'''
# LOG:20180319 我承认我快疯了,虽然早上看明白了廖雪峰的 class 但是看到 LP3THW 我彻底的晕菜了,晚上代码调试是通过了,但是还是浑身冒汗,这个作者写的都是些什么!!。
|
[
"anzhihe1218@gmail.com"
] |
anzhihe1218@gmail.com
|
c292d4351f077c622810d312a769321ec9aaf9cc
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_size256/pyr_0s/bce_s001_tv_s0p1_L6/step10_a.py
|
c878c87ed7d82d627aa11997e884698d36ad3924
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,896
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type9_mask_flow_have_bg_dtd_hdr_mix_and_paper
use_loss_obj = [G_bce_s001_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_img_resize( (256, 256) ).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_img_resize( (256, 256) ).set_result_name(result_name="L6_ch032_bl_pyr_-20220403_012819")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
d27dace65bdd94863a402b1a7e7a1fb8e6f2467c
|
48122a5eca895dd926e1568e143babb6cfbef3a8
|
/pyunit_address/__init__.py
|
d8c1e5f1e58e91c8350b9c9578bb031ad3b4be4d
|
[] |
no_license
|
pyunits/pyunit-address
|
5994b640cf837e5167bc1a97d17d83d440c6f2fd
|
f754285feaaf136c802aaf4b8b554783e50262fb
|
refs/heads/master
| 2023-04-01T21:32:47.223824
| 2021-03-31T09:54:40
| 2021-03-31T09:54:40
| 242,957,473
| 15
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/2/18 15:23
# @Author: Jtyoui@qq.com
from .address import Address # 地址初始化
from .addressType import AddressType # 得到地址类型
from .correctionAddress import correct_address # 纠错地址
from .findAddress import find_address # 查询地址
from .supplementAddress import supplement_address # 补全地址
from .tool import *
__version__ = '2021.3.31'
__author__ = 'Jtyoui'
__description__ = '全国五级地址查询'
__email__ = 'jtyoui@qq.com'
__names__ = 'pyUnit_address'
__url__ = 'https://github.com/PyUnit/pyunit-address'
|
[
"jtyoui@qq.com"
] |
jtyoui@qq.com
|
2382fa20e7fc0ba8a0cc593ed64f1cbe10471611
|
737c11da973590b7ae70845128caa7ca2c03be43
|
/acorn/test/AcornTest/AcornUtil.py
|
cf04be8f05831ac48fb523f7a7565bc580cb0ff3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] |
permissive
|
hobinyoon/apache-cassandra-3.0.5-src
|
c04a0309eb52a8517c74a3526680ba9d68592fd1
|
fc6710f9ce117e22286b9f42955b3e7632844160
|
refs/heads/master
| 2020-12-21T00:02:50.636174
| 2017-05-19T14:10:27
| 2017-05-19T14:10:27
| 57,812,287
| 0
| 3
| null | 2017-05-19T14:10:28
| 2016-05-01T20:41:06
|
Java
|
UTF-8
|
Python
| false
| false
| 1,810
|
py
|
import os
import pprint
import sys
sys.path.insert(0, "/home/ubuntu/work/acorn-tools/util/python")
import Cons
import Util
sys.path.insert(0, "/home/ubuntu/work/acorn-tools/ec2")
import DescInst
def GenHostfiles():
dn = "%s/.run" % os.path.dirname(os.path.realpath(__file__))
fn_pssh_hn = "%s/pssh-hostnames" % dn
fn_dc_ip_map = "%s/dc-ip-map" % dn
# Generate all files if any of them doesn't exist
if os.path.isfile(fn_pssh_hn) and os.path.isfile(fn_dc_ip_map):
return
with Cons.MeasureTime("Generating host files ..."):
sys.stdout.write(" ")
inst_descriptions = DescInst.GetInstDescs("acorn-server")
#Cons.P(pprint.pformat(inst_descriptions, indent=2, width=100))
# Take only running instances. There can be other instances like "terminated".
inst_descriptions = [a for a in inst_descriptions if a["State"]["Name"] == "running"]
Util.RunSubp("mkdir -p %s" % dn)
with open(fn_pssh_hn, "w") as fo:
for inst_desc in inst_descriptions:
fo.write("%s\n" % inst_desc["PublicIpAddress"])
Cons.P("Created %s %d" % (fn_pssh_hn, os.path.getsize(fn_pssh_hn)))
with open(fn_dc_ip_map, "w") as fo:
for inst_desc in inst_descriptions:
az = inst_desc["Placement"]["AvailabilityZone"]
dc = az[:-1]
ip = inst_desc["PublicIpAddress"]
fo.write("%s %s\n" % (dc, ip))
Cons.P("Created %s %d" % (fn_dc_ip_map, os.path.getsize(fn_dc_ip_map)))
#PRJ_ROOT=$HOME/work/pr/2n
#HOSTS_FILE=$PRJ_ROOT/conf/hosts
#PSSH_OUT_DIR=/tmp/pssh-out
#
#
#def MergeOutput():
# prefix=$1
# rm -f $prefix-all
# for f in $prefix/*
# do
# echo "["`basename $f`"]" >> $prefix-all
# cat $f >> $prefix-all
# echo "" >> $prefix-all
# done
#
#
#def CatOutput():
# merge_output $@
# prefix=$1
# cat $prefix-all
#
#function less_output {
# merge_output $@
# prefix=$1
# less -r $prefix-all
#}
|
[
"hobinyoon@gmail.com"
] |
hobinyoon@gmail.com
|
cd9ea9f6995583d04647f40306bbc383cf0ce446
|
ff3c4368081cd83b4fc30315d4ef2228d4682406
|
/pipeline/sam-calc-refcov-cmp.py
|
8efada185432d49997791096dc7dffd4d6cd2ad2
|
[
"BSD-3-Clause"
] |
permissive
|
dib-lab/2014-streaming
|
8489aaa8ab86b409865dd1cc82f6dd68397303e3
|
4873ebfb87a7a95efdb1fbd4607ffdf76e750bbb
|
refs/heads/master
| 2021-01-24T21:26:41.208661
| 2016-10-25T18:08:05
| 2016-10-25T18:08:05
| 28,547,623
| 1
| 1
| null | 2016-10-25T18:08:05
| 2014-12-27T22:14:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
#! /usr/bin/env python
import sys
import argparse
import screed
import math
def ignore_at(iter):
for item in iter:
if item.startswith('@'):
continue
yield item
def main():
parser = argparse.ArgumentParser()
parser.add_argument('genome')
parser.add_argument('samfile1')
parser.add_argument('samfile2')
args = parser.parse_args()
genome_dict1 = {}
genome_dict2 = {}
for record in screed.open(args.genome):
genome_dict1[record.name] = [0] * len(record.sequence)
genome_dict2[record.name] = [0] * len(record.sequence)
n = 0
n_skipped = 0
for samline in ignore_at(open(args.samfile1)):
n += 1
if n % 100000 == 0:
print >>sys.stderr, '...1', n
readname, flags, refname, refpos, _, _, _, _, _, seq = \
samline.split('\t')[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped)
continue
refpos = int(refpos)
try:
ref = genome_dict1[refname]
except KeyError:
print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname)
n_skipped += 1
continue
for i in range(refpos - 1, refpos + len(seq) - 1):
if i < len(ref):
ref[i] = 1
n = 0
for samline in ignore_at(open(args.samfile2)):
n += 1
if n % 100000 == 0:
print >>sys.stderr, '...2', n
readname, flags, refname, refpos, _, _, _, _, _, seq = \
samline.split('\t')[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped)
continue
refpos = int(refpos)
try:
ref = genome_dict2[refname]
except KeyError:
print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname)
n_skipped += 1
continue
for i in range(refpos - 1, refpos + len(seq) - 1):
if i < len(ref):
ref[i] = 1
if n_skipped / float(n) > .01:
raise Exception, "Error: too many reads ignored! %d of %d" % \
(n_skipped, n)
total = 0.
cov1 = 0.
cov2 = 0.
for name in genome_dict1:
total += len(genome_dict1[name])
cov1 += sum(genome_dict1[name])
cov2 += sum(genome_dict2[name])
print args.samfile1, float(cov1) / float(total), cov1, total
print args.samfile2, float(cov2) / float(total), cov2, total
print 'lost: %f' % (1.0 - float(cov2) / float(cov1),)
print 'lost: %d of %d' % (cov1 - cov2, total)
if __name__ == '__main__':
main()
|
[
"titus@idyll.org"
] |
titus@idyll.org
|
be04c205e071f2ef645ebbaa5a3f066e0a5d660e
|
0d834835098f86c153367956cb2513377c91074c
|
/basic/html_table/table.py
|
d9fa2816db8db4c537b7c2ddd956d17b2a82d654
|
[] |
no_license
|
klandon94/flask_fundamentals
|
a63c2c6b5567627203299a685db5f73a8d749051
|
ee8cd8976b504a03c15696019c2091951a3a5a73
|
refs/heads/master
| 2020-03-21T06:14:53.013649
| 2018-06-21T18:20:36
| 2018-06-21T18:20:36
| 138,207,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from flask import Flask,render_template as render
app = Flask(__name__)
@app.route('/')
def table():
users = (
{'first_name': 'LeBron','last_name':'James'},
{'first_name': 'Kobe','last_name':'Bryant'},
{'first_name': 'Michael','last_name':'Jordan'},
{'first_name': 'Tim','last_name':'Duncan'}
)
return render('table.html', data=users)
if __name__ == "__main__":
app.run(debug=True)
|
[
"kenny.landon88@gmail.com"
] |
kenny.landon88@gmail.com
|
616ac500554e2c805a0e873d7f50ac2c6131a9dd
|
1ea0e2b4f064ba0de45a73c527ee89a36771e8fc
|
/src/sentry/web/frontend/project_plugins.py
|
30451ad0baa6661f163b889f712f71df204c9932
|
[
"BSD-2-Clause"
] |
permissive
|
atlassian/sentry
|
6775e59c317f20f96982e91c2b3c88c02ecbb56b
|
b937615079d7b24dc225a83b99b1b65da932fc66
|
refs/heads/master
| 2023-08-27T15:45:47.699173
| 2017-09-18T22:14:55
| 2017-09-18T22:14:55
| 103,999,066
| 1
| 5
|
BSD-3-Clause
| 2023-04-01T07:49:37
| 2017-09-18T22:38:18
|
Python
|
UTF-8
|
Python
| false
| false
| 995
|
py
|
from __future__ import absolute_import
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginsView(ProjectView):
required_scope = 'project:write'
def handle(self, request, organization, team, project):
if request.POST:
enabled = set(request.POST.getlist('plugin'))
for plugin in plugins.configurable_for_project(project, version=None):
if plugin.slug in enabled:
plugin.enable(project)
else:
plugin.disable(project)
messages.add_message(
request, messages.SUCCESS, _('Your settings were saved successfully.')
)
return self.redirect(request.path)
context = {
'page': 'plugins',
}
return self.respond('sentry/projects/plugins/list.html', context)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
b27aa8ef9e31b0a9620b8601e53177fa99afc77b
|
0354d8e29fcbb65a06525bcac1f55fd08288b6e0
|
/clients/python-flask/generated/swagger_server/models/extension_class_impllinks.py
|
43b79fcdb2ef700ae5858714220dd95fce07aa70
|
[
"MIT"
] |
permissive
|
zhiwei55/swaggy-jenkins
|
cdc52956a40e947067415cec8d2da1425b3d7670
|
678b5477f5f9f00022b176c34b840055fb1b0a77
|
refs/heads/master
| 2020-03-06T20:38:53.012467
| 2018-02-19T01:53:33
| 2018-02-19T01:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.link import Link
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class ExtensionClassImpllinks(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _self: Link=None, _class: str=None):
"""
ExtensionClassImpllinks - a model defined in Swagger
:param _self: The _self of this ExtensionClassImpllinks.
:type _self: Link
:param _class: The _class of this ExtensionClassImpllinks.
:type _class: str
"""
self.swagger_types = {
'_self': Link,
'_class': str
}
self.attribute_map = {
'_self': 'self',
'_class': '_class'
}
self.__self = _self
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'ExtensionClassImpllinks':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExtensionClassImpllinks of this ExtensionClassImpllinks.
:rtype: ExtensionClassImpllinks
"""
return deserialize_model(dikt, cls)
@property
def _self(self) -> Link:
"""
Gets the _self of this ExtensionClassImpllinks.
:return: The _self of this ExtensionClassImpllinks.
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self: Link):
"""
Sets the _self of this ExtensionClassImpllinks.
:param _self: The _self of this ExtensionClassImpllinks.
:type _self: Link
"""
self.__self = _self
@property
def _class(self) -> str:
"""
Gets the _class of this ExtensionClassImpllinks.
:return: The _class of this ExtensionClassImpllinks.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""
Sets the _class of this ExtensionClassImpllinks.
:param _class: The _class of this ExtensionClassImpllinks.
:type _class: str
"""
self.__class = _class
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
d445f3dfb6f9d07858e859c69bbd06685a4c2ee4
|
13faa0d553ed6c6a57791db3dfdb2a0580a1695b
|
/CodeChef/Practice/Beginner/LONGSEQ.py
|
54638200608b86934ba675bf7674e2b4f9055ec4
|
[] |
no_license
|
kautsiitd/Competitive_Programming
|
ba968a4764ba7b5f2531d03fb9c53dc1621c2d44
|
a0d8ae16646d73c346d9ce334e5b5b09bff67f67
|
refs/heads/master
| 2021-01-17T13:29:52.407558
| 2017-10-01T09:58:23
| 2017-10-01T09:58:23
| 59,496,650
| 0
| 0
| null | 2017-05-20T17:27:18
| 2016-05-23T15:56:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
for _ in range(input()):
a = raw_input()
if a.count('0') == 1 or a.count('1') == 1:
print "Yes"
else:
print "No"
|
[
"kautsiitd@gmail.com"
] |
kautsiitd@gmail.com
|
4c36bb6674d30229ecd48b9147a7917e4d314c33
|
b7cb542ba2a0b00472aedda3a47a42f34ed01feb
|
/blogApp/admin.py
|
d6cd851510d7f6c427df2426cb543fe1f321f30b
|
[] |
no_license
|
sajibuzzaman/Django_Rest_Framework
|
1c2f5bc00ef5d4b380cc94d60c66a77bdf69a955
|
c83cce921ad41f82c7ac960000ae1c0e2e2c5604
|
refs/heads/master
| 2023-04-04T14:30:28.609151
| 2021-04-20T11:58:10
| 2021-04-20T11:58:10
| 359,554,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
from django.contrib import admin
from .models import Blog, CommentBlog
class CommentBlogInline(admin.TabularInline):
model = CommentBlog
extra = 0
readonly_fields = ['blog','user', 'comment', 'ip']
class BlogAdmin(admin.ModelAdmin):
list_display = ['title', 'date_posted', 'image_tag']
list_filter = ['date_posted']
inlines = [CommentBlogInline]
list_per_page = 10
class CommentBlogAdmin(admin.ModelAdmin):
readonly_fields = ['blog','user', 'comment', 'ip']
list_display = ['blog', 'status', 'created_at', 'updated_at', 'user']
list_filter = ['status', 'created_at']
list_per_page = 10
# Register your models here.
admin.site.register(Blog, BlogAdmin)
|
[
"muhammadsajibuzzaman1998@gmail.com"
] |
muhammadsajibuzzaman1998@gmail.com
|
a6d97883d813063e9e64c30383d463c3795377ed
|
d66993b0383ee7a97c9d5fe761269a3cb8e67e22
|
/Ejercicios/EjercicioAños.py
|
437838d33b904ab1f6d4be9893aad03aede04c48
|
[] |
no_license
|
rramosaveros/CursoPythonCisco
|
09828e3d8190490c0dc30861ae241f5222e108d6
|
1508e67873adfcf31b8c78d3f5cb2a0572dfeb1c
|
refs/heads/master
| 2023-06-27T12:07:52.652780
| 2021-08-01T14:19:19
| 2021-08-01T14:19:19
| 391,647,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
año = int(input("Introduzca un año:"))
#
if año <= 1580:
print("No dentro del período del calendario gregoriano")
elif (año%4) != 0:
print("Año común")
elif (año % 100) != 0:
print("Año bisiesto")
elif (año % 400) != 0:
print("Año común")
else:
print("Año bisiesto")
#
|
[
"ramoslenin32@gmail.com"
] |
ramoslenin32@gmail.com
|
9ae571f70426fb357555e597a1796b30b120411c
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/aio/operations/_operations.py
|
523f0e7792ab8919d6a9be76ddbebb382332f883
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 6,073
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_resource_skus_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2019_04_01.aio.ComputeManagementClient`'s
:attr:`resource_skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, *, filter: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.ResourceSku"]:
"""Gets the list of Microsoft.Compute SKUs available for your Subscription.
:keyword filter: The filter to apply on the operation. Only **location** filter is supported
currently. Default value is None.
:paramtype filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSku or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_04_01.models.ResourceSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01"))
cls: ClsType[_models.ResourceSkusResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resource_skus_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceSkusResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus"}
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
f44e8f2bbb168252f1c63bf3eaa369e9b9bca476
|
ded1edb8ed387f9d1989334b8dd3ee397b36bf8b
|
/djangorest/api/views.py
|
317904089ccab3c785a6d5359f3be0fc1b499057
|
[] |
no_license
|
elevenmunki/django_api
|
c790dfe6c8ab1d8da9454a9fc17d3bb339a7d39c
|
3a16214f18d015d4ce21f42a473f9d69274aab80
|
refs/heads/master
| 2021-07-12T02:51:30.513141
| 2017-09-30T01:16:21
| 2017-09-30T01:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import generics
from .serializers import BucketlistSerializer
from .models import Bucketlist
#ListCreateAPIView is a generic view which provides GET (list all) and POST method handler
class CreateView(generics.ListCreateAPIView):
"""This class defines the create behavior of our rest api."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save()
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
d07a9c45cb065bb11701d18947cbb54a96100d40
|
e3742e43ea3ca59016406d3c4308c21fad07d3d5
|
/Basics/Shanbey/P30_章节回顾_2018世界杯各队进球数.py
|
3fdd8891e6dc9a305211d850685b3a222863e75c
|
[] |
no_license
|
yangyang0126/PythonLearning
|
e499c59ce04e884c3614c6a8c6a5b219234dce6c
|
4a8ec4386ecb7609abb56c533131e4c283b628ec
|
refs/heads/master
| 2020-08-10T18:15:18.720145
| 2020-05-24T02:29:19
| 2020-05-24T02:29:19
| 214,393,778
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
'''
下方变量 goal_record 是2018年俄罗斯世界杯64场球赛的比分(不包含淘汰赛点球大战)
创建字典 wc_2018_goal,记录32支球队的各自进球数。
'''
goal_record = """Russia 5–0 Saudi Arabia;
Egypt 0–1 Uruguay;
Russia 3–1 Egypt;
Uruguay 1–0 Saudi Arabia;
Uruguay 3–0 Russia;
Saudi Arabia 2–1 Egypt;
Morocco 0–1 Iran;
Portugal 3–3 Spain;
Portugal 1–0 Morocco;
Iran 0–1 Spain;
Iran 1–1 Portugal;
Spain 2–2 Morocco;
France 2–1 Australia;
Peru 0–1 Denmark;
Denmark 1–1 Australia;
France 1–0 Peru;
Denmark 0–0 France;
Australia 0–2 Peru;
Argentina 1–1 Iceland;
Croatia 2–0 Nigeria;
Argentina 0–3 Croatia;
Nigeria 2–0 Iceland;
Nigeria 1–2 Argentina;
Iceland 1–2 Croatia;
Costa Rica 0–1 Serbia;
Brazil 1–1 Switzerland;
Brazil 2–0 Costa Rica;
Serbia 1–2 Switzerland;
Serbia 0–2 Brazil;
Switzerland 2–2 Costa Rica;
Germany 0–1 Mexico;
Sweden 1–0 South Korea;
South Korea 1–2 Mexico;
Germany 2–1 Sweden;
South Korea 2–0 Germany;
Mexico 0–3 Sweden;
Belgium 3–0 Panama;
Tunisia 1–2 England;
Belgium 5–2 Tunisia;
England 6–1 Panama;
England 0–1 Belgium;
Panama 1–2 Tunisia;
Colombia 1–2 Japan;
Poland 1–2 Senegal;
Japan 2–2 Senegal;
Poland 0–3 Colombia;
Japan 0–1 Poland;
Senegal 0–1 Colombia;
France 4–3 Argentina;
Uruguay 2–1 Portugal;
Spain 1–1 Russia;
Croatia 1–1 Denmark;
Brazil 2–0 Mexico;
Belgium 3–2 Japan;
Sweden 1–0 Switzerland;
Colombia 1–1 England;
Uruguay 0–2 France;
Brazil 1–2 Belgium;
Sweden 0–2 England;
Russia 2–2 Croatia;
France 1–0 Belgium;
Croatia 2–1 England;
Belgium 2–0 England;
France 4–2 Croatia"""
team_goal = []
for match in goal_record.split(";\n"):
for team in match.split("–"):
team_goal.append(team)
wc_2018_goal = {}
for num in range(len(team_goal)):
if num % 2 == 0:
wc_2018_goal[team_goal[num][:-2]] = 0
else:
wc_2018_goal[team_goal[num][2:]] = 0
for num in range(len(team_goal)):
if num % 2 == 0:
wc_2018_goal[team_goal[num][:-2]] += int(team_goal[num][-1])
else:
wc_2018_goal[team_goal[num][2:]] += int(team_goal[num][0])
for key,value in wc_2018_goal.items():
print(key,value)
|
[
"zhaojingyi0126@163.com"
] |
zhaojingyi0126@163.com
|
50b8d4da6ad76e44d8f29598f31da8e0096c0e67
|
a397ac42dab5e68342a412a0b00cbcf401663d13
|
/multithreadingTest.py
|
5a334e82563ae69481bcbf69394dba9799268153
|
[] |
no_license
|
declanoller/rpi_camera1
|
b2fdc2798e3c44980a0d09ac1414a65e06ddc821
|
5dff10b9745b660d21a23dd4a9d42be34d1340b1
|
refs/heads/master
| 2020-03-21T09:58:25.576696
| 2018-08-07T22:13:14
| 2018-08-07T22:13:14
| 138,427,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
from multiprocessing import Pool, Value
import datetime
from time import sleep
import os
def processFile(fName):
dtString = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
print("Processed file "+fName+" at time "+dtString+"\n")
def fileMonitor(dir):
print("entering filemonitor")
processedFiles = []
files = os.listdir(dir)
nFiles = len(files)
print("there are {} files in the directory.".format(nFiles))
while True:
sleep(.5)
files = os.listdir(dir)
if len(files)>nFiles:
print("new files found")
nFiles = len(files)
print("this many files now:",nFiles)
[processFile(file) for file in files if file not in processedFiles]
[processedFiles.append(file) for file in files if file not in processedFiles]
def fileSaver(path):
print("entering filesaver")
while True:
sleep(3)
dtString = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
fName = path+"/"+dtString+".txt"
print("created new file "+fName+" at time "+dtString+"\n")
f = open(fName,'w')
f.write("created new file "+fName+" at time "+dtString+"\n")
f.close()
if len(os.sys.argv)>1:
dir = os.sys.argv[1]
print(dir)
print(len(dir))
print(len((str(dir),)))
pool = Pool(processes=2)
p1 = pool.apply_async(fileMonitor,args=(dir,))
p2 = pool.apply_async(fileSaver,args=(dir,))
print(p1.get(timeout=30))
print(p2.get(timeout=30))
|
[
"declanoller@gmail.com"
] |
declanoller@gmail.com
|
bcf1792361959fe827e7c081b4078be6ae08174b
|
6580ba5d135c4f33f1a0996953ba2a65f7458a14
|
/applications/ji178/models/fdutilities0color.py
|
91fd463eb0f338b25751b6a4ed0885b19f611149
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
ali96343/facew2p
|
02b038d3853691264a49de3409de21c8a33544b8
|
a3881b149045e9caac344402c8fc4e62edadb42f
|
refs/heads/master
| 2021-06-10T17:52:22.200508
| 2021-05-10T23:11:30
| 2021-05-10T23:11:30
| 185,795,614
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,439
|
py
|
#
# table for controller: utilities_color
#
from gluon.contrib.populate import populate
db.define_table('dutilities0color',
Field('f0', label='key', writable = True , length= 1000),
Field('f1', 'text', label='data string', length= 1000),
Field('f2', 'text', label='save data string', length= 1000, default='' ),
)
#
if not db(db.dutilities0color.id ).count():
db.dutilities0color.insert( f0= 'sp247', f1= '(247)Dashboard')
db.dutilities0color.insert( f0= 'sp248', f1= '(248)Components')
db.dutilities0color.insert( f0= 'hf249', f1= '(249)Custom Components:')
db.dutilities0color.insert( f0= 'aa251', f1= '(251)Buttons')
db.dutilities0color.insert( f0= 'aa253', f1= '(253)Cards')
db.dutilities0color.insert( f0= 'sp254', f1= '(254)Utilities')
db.dutilities0color.insert( f0= 'hf255', f1= '(255)Custom Utilities:')
db.dutilities0color.insert( f0= 'aa257', f1= '(257)Colors')
db.dutilities0color.insert( f0= 'aa259', f1= '(259)Borders')
db.dutilities0color.insert( f0= 'aa261', f1= '(261)Animations')
db.dutilities0color.insert( f0= 'aa263', f1= '(263)Other')
db.dutilities0color.insert( f0= 'sp264', f1= '(264)Pages')
db.dutilities0color.insert( f0= 'hf265', f1= '(265)Login Screens:')
db.dutilities0color.insert( f0= 'aa267', f1= '(267)Login')
db.dutilities0color.insert( f0= 'aa269', f1= '(269)Register')
db.dutilities0color.insert( f0= 'aa271', f1= '(271)Forgot Password')
db.dutilities0color.insert( f0= 'hf272', f1= '(272)Other Pages:')
db.dutilities0color.insert( f0= 'aa274', f1= '(274)404 Page')
db.dutilities0color.insert( f0= 'aa276', f1= '(276)Blank Page')
db.dutilities0color.insert( f0= 'sp278', f1= '(278)Charts')
db.dutilities0color.insert( f0= 'sp280', f1= '(280)Tables')
db.dutilities0color.insert( f0= 'pb281', f1= '(281)Search for...')
db.dutilities0color.insert( f0= 'pb282', f1= '(282)Search for...')
db.dutilities0color.insert( f0= 'sx283', f1= '(283)3+')
db.dutilities0color.insert( f0= 'di284', f1= '(284)December 12, 2019')
db.dutilities0color.insert( f0= 'sx285', f1= '(285)A new monthly report is ready to download!')
db.dutilities0color.insert( f0= 'di286', f1= '(286)December 7, 2019')
db.dutilities0color.insert( f0= 'di287', f1= '(287)December 2, 2019')
db.dutilities0color.insert( f0= 'aa288', f1= '(288)Show All Alerts')
db.dutilities0color.insert( f0= 'di289', f1= '(289)Hi there! I am wondering if you can help me with a problem I ve been having.')
db.dutilities0color.insert( f0= 'di290', f1= '(290)Emily Fowler 58m')
db.dutilities0color.insert( f0= 'di291', f1= '(291)I have the photos that you ordered last month, how would you like them sent to you?')
db.dutilities0color.insert( f0= 'di292', f1= '(292)Jae Chun 1d')
db.dutilities0color.insert( f0= 'di293', f1= '(293)Last month s report looks great, I am very happy with the progress so far, keep up the good work!')
db.dutilities0color.insert( f0= 'di294', f1= '(294)Morgan Alvarez 2d')
db.dutilities0color.insert( f0= 'di295', f1= '(295)Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren t good...')
db.dutilities0color.insert( f0= 'di296', f1= '(296)Chicken the Dog 2w')
db.dutilities0color.insert( f0= 'aa297', f1= '(297)Read More Messages')
db.dutilities0color.insert( f0= 'sx298', f1= '(298)Valerie Luna')
db.dutilities0color.insert( f0= 'ha299', f1= '(299)Color Utilities')
db.dutilities0color.insert( f0= 'pc300', f1= '(300)page. The custom utilities below were created to extend this theme past the default utility classes built into Bootstrap s framework.')
db.dutilities0color.insert( f0= 'aa301', f1= '(301)Bootstrap Documentation')
db.dutilities0color.insert( f0= 'hf302', f1= '(302)Custom Text Color Utilities')
db.dutilities0color.insert( f0= 'pc303', f1= '(303).text-gray-100')
db.dutilities0color.insert( f0= 'pc304', f1= '(304).text-gray-200')
db.dutilities0color.insert( f0= 'pc305', f1= '(305).text-gray-300')
db.dutilities0color.insert( f0= 'pc306', f1= '(306).text-gray-400')
db.dutilities0color.insert( f0= 'pc307', f1= '(307).text-gray-500')
db.dutilities0color.insert( f0= 'pc308', f1= '(308).text-gray-600')
db.dutilities0color.insert( f0= 'pc309', f1= '(309).text-gray-700')
db.dutilities0color.insert( f0= 'pc310', f1= '(310).text-gray-800')
db.dutilities0color.insert( f0= 'pc311', f1= '(311).text-gray-900')
db.dutilities0color.insert( f0= 'hf312', f1= '(312)Custom Font Size Utilities')
db.dutilities0color.insert( f0= 'pc313', f1= '(313).text-xs')
db.dutilities0color.insert( f0= 'pc314', f1= '(314).text-lg')
db.dutilities0color.insert( f0= 'hf315', f1= '(315)Custom Background Gradient Utilities')
db.dutilities0color.insert( f0= 'di316', f1= '(316).bg-gradient-primary')
db.dutilities0color.insert( f0= 'di317', f1= '(317).bg-gradient-success')
db.dutilities0color.insert( f0= 'di318', f1= '(318).bg-gradient-info')
db.dutilities0color.insert( f0= 'di319', f1= '(319).bg-gradient-warning')
db.dutilities0color.insert( f0= 'di320', f1= '(320).bg-gradient-danger')
db.dutilities0color.insert( f0= 'hf321', f1= '(321)Custom Grayscale Background Utilities')
db.dutilities0color.insert( f0= 'di322', f1= '(322).bg-gray-100')
db.dutilities0color.insert( f0= 'di323', f1= '(323).bg-gray-200')
db.dutilities0color.insert( f0= 'di324', f1= '(324).bg-gray-300')
db.dutilities0color.insert( f0= 'di325', f1= '(325).bg-gray-400')
db.dutilities0color.insert( f0= 'di326', f1= '(326).bg-gray-500')
db.dutilities0color.insert( f0= 'di327', f1= '(327).bg-gray-600')
db.dutilities0color.insert( f0= 'di328', f1= '(328).bg-gray-700')
db.dutilities0color.insert( f0= 'di329', f1= '(329).bg-gray-800')
db.dutilities0color.insert( f0= 'di330', f1= '(330).bg-gray-900')
db.dutilities0color.insert( f0= 'sp331', f1= '(331)Copyright © Your Website 2019')
db.dutilities0color.insert( f0= 'he332', f1= '(332)Ready to Leave?')
db.dutilities0color.insert( f0= 'sx333', f1= '(333)')
db.dutilities0color.insert( f0= 'di334', f1= '(334)Select Logout below if you are ready to end your current session.')
db.dutilities0color.insert( f0= 'bu335', f1= '(335)Cancel')
db.dutilities0color.insert( f0= 'aa337', f1= '(337)Logout')
db.commit()
#
|
[
"ab96343@gmail.com"
] |
ab96343@gmail.com
|
7805ee413e99196317a0afddff32012074eee45c
|
37d6493969b783755a64b182c588b05f32ff8964
|
/cadash/utils.py
|
f9d2b6c7b1eed9dafe16bca523c8ab54ac9cdbec
|
[
"Apache-2.0"
] |
permissive
|
harvard-dce/cadash
|
97943f726fe73d88ee8677eeb670f4f82ab5c8c5
|
10300ca4ce097d8a633612554d257b939633eeae
|
refs/heads/master
| 2020-04-04T21:42:34.816945
| 2017-07-26T18:31:23
| 2017-07-26T18:31:23
| 59,121,777
| 0
| 1
| null | 2016-09-16T15:58:13
| 2016-05-18T14:13:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,831
|
py
|
# -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
import os
import logging
import logging.config
import platform
import re
import sys
import yaml
from flask import current_app
from flask import flash
from flask import redirect
from flask import request
from flask import url_for
from flask_login import current_user
from functools import wraps
import requests
from requests.auth import HTTPBasicAuth
from cadash import __version__
from cadash.user.models import BaseUser
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
# from http://victorlin.me/posts/2012/08/26/good-logging-practice-in-python
def setup_logging(
app,
default_level=logging.INFO):
"""
set up logging config.
:param: app: application obj; relevant app.config['LOG_CONFIG']
which is the full path to the yaml file with configs for logs
:param: default_level: log level for basic config, default=INFO
"""
if os.path.exists(app.config['LOG_CONFIG']):
with open(app.config['LOG_CONFIG'], 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def clean_name(name):
"""
clean `name` from non_alpha.
replaces non-alpha with underscores '_' and set the string to lower case
"""
return re.sub('[^0-9a-zA-Z]+', '_', name.strip()).lower()
def pull_data(url, creds=None):
"""
get text file from `url`.
reads a text file from given url
if basic auth needed, pass args creds['user'] and creds['pwd']
"""
headers = {
'User-Agent': default_useragent(),
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html, text/*'
}
au = None
if creds is not None:
if 'user' in creds and 'pwd' in creds:
au = HTTPBasicAuth(creds['user'], creds['pwd'])
headers.update({'X-REQUESTED-AUTH': 'Basic'})
try:
response = requests.get(url, headers=headers, auth=au)
except requests.HTTPError as e:
logger = logging.getLogger(__name__)
logger.warning('data from url(%s) is unavailable. Error: %s' % (url, e))
return None
else:
return response.text
def default_useragent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join(
[_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return ' '.join([
'%s/%s' % (__name__, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def fetch_ldap_user(usr, pwd, cli):
"""fetch user in ldap, and the groups user belongs to.
returns a BaseUser object or None if not authenticated or unknown
"""
if cli.is_authenticated(usr, pwd):
u = BaseUser(usr)
groups = cli.fetch_groups(usr)
u.place_in_groups(groups)
return u
else:
return None
def is_authorized_by_groups(user, groups):
"""return True if `user` in any group of list `groups`."""
for g in groups:
if user.is_in_group(g):
return True
return False
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if not current_app.config.get('LOGIN_DISABLED'):
if not is_authorized_by_groups(current_user, *roles):
flash('You need to login, or do not have credentials to access this page', 'info')
return redirect(url_for('public.home', next=request.url))
return f(*args, **kwargs)
return wrapped
return wrapper
|
[
"nmaekawa@g.harvard.edu"
] |
nmaekawa@g.harvard.edu
|
4bb2be4d6fdbddbaf576d3a9d74969ce33b1a2ce
|
f44aa93f92f2ddfa0e3ed6595c0b77c3ab14dde1
|
/v7_Modular_events/runner.py
|
af0a0cf748808975286cabf01c5d5f0c0a51b50e
|
[] |
no_license
|
michaeljpitcher/Lung-Network-Model
|
5b7efd2f4522852e9be415aa3485419226f9fa8f
|
8f23dc1c2002bdabc0f8ec5bd5078d64628adcb5
|
refs/heads/master
| 2020-06-23T05:17:07.357279
| 2017-06-29T15:28:24
| 2017-06-29T15:28:24
| 74,665,064
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
__author__ = "Michael J. Pitcher"
from Models.TB.TBModel3 import TBModel3
from Models.TB.TBClasses import *
from Models.TB.TBEventProbabilityKeys import *
params = {}
params[P_REPLICATION_BACTERIA_FAST] = 0.1
params[P_REPLICATION_BACTERIA_SLOW] = 0.01
params[P_TRANSLOCATE_BRONCHUS_BACTERIA_FAST] = 0.0
params[P_TRANSLOCATE_BRONCHUS_BACTERIA_SLOW] = 0.0
params[P_CHANGE_BACTERIA_FAST_TO_SLOW] = 0.0
params[P_CHANGE_BACTERIA_SLOW_TO_FAST] = 0.0
params[P_INGEST_AND_DESTROY_MACROPHAGE_FAST] = 0.0
params[P_INGEST_AND_DESTROY_MACROPHAGE_SLOW] = 0.0
params[P_RECRUITMENT_BPS_MACROPHAGE] = 0.0
params[P_RECRUITMENT_LYMPH_MACROPHAGE] = 0.0
params[P_DEATH_MACROPHAGE] = 0.0
model = TBModel3(params)
loads_f = {1: 10}
loads_s = {2: 5}
loads_m = {3: 10}
model.load(loads_f, loads_s, loads_m)
import cProfile
p = cProfile.Profile()
p.enable()
model.run(75)
p.disable()
p.print_stats('cumtime')
# model.display_network([BACTERIA_FAST, BACTERIA_SLOW, MACROPHAGE])
|
[
"mjp22@st-andrews.ac.uk"
] |
mjp22@st-andrews.ac.uk
|
a10f9c0aa132f6863b3adb1a46e00038ae728335
|
029948b3fd0e41d80d66c84d808abff4fcb24ac8
|
/dnac_api_client/models/claim_device_request_config_list.py
|
15bfc7d92efe92a13a449347e88c0422c99f1119
|
[] |
no_license
|
yijxiang/dnac-api-client
|
842d1da9e156820942656b8f34342d52c96d3c37
|
256d016e2df8fc1b3fdad6e28f441c6005b43b07
|
refs/heads/master
| 2021-09-25T21:10:09.502447
| 2018-10-25T14:39:57
| 2018-10-25T14:39:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,060
|
py
|
# coding: utf-8
"""
Cisco DNA Center Platform v. 1.2.x (EFT)
REST API (EFT) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ClaimDeviceRequestConfigList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_id': 'str',
'config_parameters': 'list[ClaimDeviceRequestConfigParameters]'
}
attribute_map = {
'config_id': 'configId',
'config_parameters': 'configParameters'
}
def __init__(self, config_id=None, config_parameters=None): # noqa: E501
"""ClaimDeviceRequestConfigList - a model defined in OpenAPI""" # noqa: E501
self._config_id = None
self._config_parameters = None
self.discriminator = None
if config_id is not None:
self.config_id = config_id
if config_parameters is not None:
self.config_parameters = config_parameters
@property
def config_id(self):
"""Gets the config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:return: The config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:rtype: str
"""
return self._config_id
@config_id.setter
def config_id(self, config_id):
"""Sets the config_id of this ClaimDeviceRequestConfigList.
:param config_id: The config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:type: str
"""
self._config_id = config_id
@property
def config_parameters(self):
"""Gets the config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:return: The config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:rtype: list[ClaimDeviceRequestConfigParameters]
"""
return self._config_parameters
@config_parameters.setter
def config_parameters(self, config_parameters):
"""Sets the config_parameters of this ClaimDeviceRequestConfigList.
:param config_parameters: The config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:type: list[ClaimDeviceRequestConfigParameters]
"""
self._config_parameters = config_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClaimDeviceRequestConfigList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"cunningr@cisco.com"
] |
cunningr@cisco.com
|
b906a8ee56c5bdb20867a9f5ededa1379458b25e
|
1e3461947b86538c384d2faaab9a505912a151fb
|
/color_transformations.py
|
6fa7f8fdaaf8168fd16401c26c13470ae5408884
|
[
"MIT"
] |
permissive
|
maxalbert/colormap-selector
|
c20a53ec36f90637aef11620434cc7811e49cc97
|
43ec6e70058e4b75496def3e49471c76c8684ef3
|
refs/heads/master
| 2021-01-01T05:50:17.044061
| 2015-01-09T00:08:47
| 2015-01-09T00:08:47
| 28,750,875
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
import numpy as np
import matplotlib.colors as mcolors
whitepoint_D65 = np.array([0.9642, 1, 0.8249])
A_xyz2rgb = np.array(
[[3.240479, -1.537150, -0.498535],
[-0.969256, 1.875992, 0.041556 ],
[0.055648, -0.204043, 1.057311 ]])
A_rgb2xyz = np.linalg.inv(A_xyz2rgb)
class RGBRangeError(Exception):
pass
def f(t):
if t > (6./29)**3:
return t**(1./3)
else:
return 1./3 * (29./6)**2 * t + 4./29
def f_inv(t):
if t > 6./29:
return t**3
else:
return 3 * (6./29)**2 * (t - 4./29)
def xyz2lab(xyz, whitepoint=whitepoint_D65):
"""
Convert from CIELAB to XYZ color coordinates.
*Arguments*
xyz: 3-tuple (or other list-like)
*Returns*
3-tuple (L, a, b).
"""
X, Y, Z = xyz
Xw, Yw, Zw = whitepoint
L = 116. * f(Y/Yw) - 16
a = 500. * (f(X/Xw) - f(Y/Yw))
b = 200. * (f(Y/Yw) - f(Z/Zw))
return np.array([L, a, b], dtype=float)
def lab2xyz(lab, whitepoint=whitepoint_D65):
L, a, b = lab
Xw, Yw, Zw = whitepoint
Y = Yw * f_inv(1./116 * (L + 16))
X = Xw * f_inv(1./116 * (L + 16) + 0.002 * a)
Z = Zw * f_inv(1./116 * (L + 16) - 0.005 * b)
return X, Y, Z
def rgb2xyz(rgb):
rgb = np.asarray(rgb)
return np.dot(A_rgb2xyz, rgb)
def xyz2rgb(xyz, assert_valid=False, clip=False):
xyz = np.asarray(xyz)
rgb = np.dot(A_xyz2rgb, xyz)
r, g, b = rgb
if assert_valid and ((r < 0.0 or r > 1.0) or
(g < 0.0 or g > 1.0) or
(b < 0.0 or b > 1.0)):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def rgb2lab(rgb, whitepoint=whitepoint_D65):
return xyz2lab(rgb2xyz(rgb), whitepoint=whitepoint)
def lab2rgb(lab, whitepoint=whitepoint_D65, assert_valid=False, clip=False):
return xyz2rgb(lab2xyz(lab, whitepoint=whitepoint), assert_valid=assert_valid, clip=clip)
def rgb2rgba(rgb):
r, g, b = rgb
return np.array([r, g, b, 1.])
def lab2rgba(lab, whitepoint=whitepoint_D65, assert_valid=False, clip=False):
return rgb2rgba(lab2rgb(lab, whitepoint=whitepoint, assert_valid=assert_valid, clip=clip))
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
[
"maximilian.albert@gmail.com"
] |
maximilian.albert@gmail.com
|
65ff0d2791ba3bee69abbe3799d327244d350005
|
0b2cc875b84e1b43daa7e0ccabe864ec69278ab0
|
/flarestack/cosmo/rates/frb_rates.py
|
2d28e7f08dbd287c963a84bddf5dfcccf0f9042b
|
[
"MIT"
] |
permissive
|
icecube/flarestack
|
4ce9f165d8a0dd1b44e8ec2185f216c92fc27d11
|
4d02244e3b92744a08b3c09009cc9aa3ea5e7931
|
refs/heads/master
| 2023-08-16T16:05:18.492306
| 2023-08-11T23:38:55
| 2023-08-11T23:38:55
| 127,512,114
| 9
| 4
|
MIT
| 2023-09-13T00:17:44
| 2018-03-31T08:03:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,479
|
py
|
import logging
from astropy import units as u
from flarestack.cosmo.rates.sfr_rates import get_sfr_evolution
local_frb_rates = {
"bochenek_20": (
7.23 * 10**7.0 / (u.Gpc**3 * u.yr),
(7.23 - 6.13) * 10**7.0 / (u.Gpc**3 * u.yr),
(7.23 + 8.78) * 10**7.0 / (u.Gpc**3 * u.yr),
"https://arxiv.org/abs/2005.10828",
),
}
def get_local_frb_rate(rate_name=None, with_range=False):
"""Returns a local rate of Fast Radio Bursts (FBBs).
:param rate_name: Name of local FRB rate to be used
:param with_range: Boolean to return +/- one sigma range functions alongside central rate
:return: Local rate
"""
if rate_name is None:
logging.info("No rate specified. Assuming default rate.")
rate_name = "bochenek_20"
if rate_name not in local_frb_rates.keys():
raise Exception(
f"Rate name '{rate_name}' not recognised. "
f"The following rates are available: {local_frb_rates.keys()}"
)
else:
local_rate, lower_lim, upper_lim, ref = local_frb_rates[rate_name]
logging.info(f"Loaded rate '{rate_name}' ({ref})")
if with_range:
if lower_lim is None:
raise Exception(
f"No one sigma rate range found for rate '{rate_name}'. "
f"Use a different rate, or set 'with_range=False'."
)
return (
local_rate.to("Mpc-3 yr-1"),
lower_lim.to("Mpc-3 yr-1"),
upper_lim.to("Mpc-3 yr-1"),
)
else:
return local_rate.to("Mpc-3 yr-1")
def get_frb_rate(evolution_name=None, rate_name=None, with_range=False, **kwargs):
"""Returns a local rate of core-collapse supernovae (CCSNe) as a function of redshift.
:param evolution_name: Name of Star Formation evolution to use
:param rate_name: Name of local FRB rate to be used
:param with_range: Boolean to return +/- one sigma range functions alongside central rate
:return: Rate as a function of redshift
"""
normed_evolution = get_sfr_evolution(evolution_name=evolution_name, **kwargs)
local_rate = get_local_frb_rate(rate_name=rate_name, with_range=with_range)
if with_range:
return (
lambda z: local_rate[0] * normed_evolution(z),
lambda z: local_rate[1] * normed_evolution(z),
lambda z: local_rate[2] * normed_evolution(z),
)
else:
return lambda z: local_rate * normed_evolution(z)
|
[
"robert.stein@desy.de"
] |
robert.stein@desy.de
|
072fcea6ad0ec0fa0e0effea0d1e577a5e7bed48
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/GLES1/EXT/OES/EGL_image.py
|
c7ba140322b6d986cfd1d3a484abbc478fc84042
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188
| 2017-04-17T11:04:55
| 2017-04-17T11:04:55
| 80,312,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from OpenGLCffi.GLES1 import params
@params(api='gles1', prms=['target', 'image'])
def glEGLImageTargetTexture2DOES(target, image):
pass
@params(api='gles1', prms=['target', 'image'])
def glEGLImageTargetRenderbufferStorageOES(target, image):
pass
|
[
"cdenizol@gmail.com"
] |
cdenizol@gmail.com
|
21a3e92d50d50f8afb88ed7e4b8b1700ab8212da
|
0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded
|
/Sungjin/String/5525.py
|
63ad63a387c4109d8b3d5474c7e89a5353e0b5c5
|
[] |
no_license
|
comojin1994/Algorithm_Study
|
0379d513abf30e3f55d6a013e90329bfdfa5adcc
|
965c97a9b858565c68ac029f852a1c2218369e0b
|
refs/heads/master
| 2021-08-08T14:55:15.220412
| 2021-07-06T11:54:33
| 2021-07-06T11:54:33
| 206,978,984
| 0
| 1
| null | 2020-05-14T14:06:46
| 2019-09-07T14:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
import sys
input = sys.stdin.readline
if __name__ == '__main__':
N = int(input())
M = int(input())
S = input().strip()
P = 'I' + 'OI'*N
start = False
check = 0
cnt, result = 0, 0
for i, s in enumerate(S):
if start:
if s == 'O' and check == 1: check += 1
elif s == 'I' and check == 2: cnt += 1
else:
if cnt >= N: result += cnt - N + 1
start = False
check, cnt = 0, 0
if s == 'I': start = True; check = 1
if cnt >= N: result += cnt - N + 1
print(result)
'''
2
25
OOIOIIOIOIOIOIOIOIOIOOIOI
6
4
100
IIOIOIOIOIOIOOOOIOIOIOIOOIIOIOIOIOIOIOIIOIOIOIOOOIIOIOIOIOIOIOOOOIOIOIOIOIOIOOIIIIOIOIOIOIOIIOIOIOIO
11
'''
|
[
"comojin1994@gmail.com"
] |
comojin1994@gmail.com
|
514e0990111b61362819faa91608463b1ba507e2
|
e1aac65877d20e8f93e63c8edf6dedd5137b562b
|
/testPydev/Integer_Roman.py
|
c3137a45187226f1c865ea1527b30aa13a179744
|
[] |
no_license
|
weezer/fun_scripts
|
67ccb6cc107c0bdd1b347d9cb1c0bdf934e1a331
|
38b8bd25d6db79044712779f95b26bf09ca0072a
|
refs/heads/master
| 2020-12-25T19:26:21.685571
| 2018-09-20T07:56:25
| 2018-09-20T07:56:25
| 18,510,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
class Solution(object):
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
roman_dict = {1: 'I', 4: 'IV', 5: 'V', 9: 'IX', 10: 'X', 40: 'XL',
50: 'L', 90: 'LC', 100: 'C', 400: 'CD', 500: 'D', 900: 'CM', 1000: 'M'}
r_lst = []
digit = 1
while num > 0:
a_lst = []
reminder = num % 10
reminder *= digit
digit *= 10
num /= 10
if roman_dict.get(reminder) is not None:
r_lst.insert(0, roman_dict.get(reminder))
else:
for minus_num in sorted(roman_dict, reverse = True):
if reminder == 0:
break
if minus_num > reminder:
continue
else:
while reminder >= minus_num:
a_lst.append(roman_dict[minus_num])
reminder -= minus_num
r_lst.insert(0, ''.join(a_lst))
return ''.join(r_lst)
if __name__ == "__main__":
s = Solution()
print s.intToRoman(3999)
|
[
"weezer.su@gmail.com"
] |
weezer.su@gmail.com
|
0d51c3a3f4fe0075d723424ee07d4b36454dd438
|
6bfe6e6c24eeb281a266a2d3fdaac645e79a4a85
|
/admin/web/login_controller.py
|
9d8a11e94f35b293ab2b7fdd2183bdc7d0fc23cc
|
[] |
no_license
|
itsumura-h/masonite_admin_install
|
606b5fe5f7043919fd955a48485e14d063116837
|
a8448eacdee0b4088b84ac2cac6d0fdec46dbe87
|
refs/heads/master
| 2020-05-31T01:17:21.094154
| 2019-06-30T07:11:13
| 2019-06-30T07:11:13
| 190,047,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
import os, pickle, secrets
from datetime import datetime
from bcrypt import checkpw
from masonite.auth import Sign
from masonite.controllers import Controller
from masonite.request import Request
from config.auth import AUTH
from config.admin import LOGIN_CONF
from admin.web.LoginToken import LoginToken
# try:
# from app.models.LoginToken import LoginToken
# except:
# from app.LoginToken import LoginToken
class LoginController(Controller):
def store(self, request: Request):
email = request.input('email')
password = request.input('password')
user = AUTH['model'].where('email', email).first()
login = checkpw(bytes(password, 'utf-8'), bytes(user.password, 'utf-8'))
if login:
hash = LoginToken().login(int(user.id))
return {'login': True, 'token': hash, 'id': user.id, 'name': user.name}
# Delete existing token
# LoginToken.where('admin_user_id', user.id).delete()
# hash = secrets.token_urlsafe()
# login_token = LoginToken()
# login_token.admin_user_id = user.id
# login_token.token = hash
# login_token.save()
# return {'login': True, 'token': hash, 'id': user.id, 'name': user.name}
else:
return {'login': False}
def destroy(self, request: Request):
login_id = request.input('login_id')
# LoginToken.where('admin_user_id', login_id).delete()
LoginToken().logout(int(login_id))
return {}
|
[
"dumblepy@gmail.com"
] |
dumblepy@gmail.com
|
25b3e881e120ecfab45a93c2d78f861cb64a3cc0
|
be1d3bbe87e42b3cc41238697129fc701380c43c
|
/web_speller/backend/web_speller/blinks/migrations/0001_initial.py
|
c6eae07327640598d0f11b266afc01ff1b717958
|
[] |
no_license
|
Borda/BCI-speller
|
051a6bd2a03f9be3e795f628decea39166946cbb
|
bf80f6c99f4258448199040d81d21f10ba1fd09d
|
refs/heads/master
| 2020-03-21T09:31:28.683073
| 2018-06-25T19:39:00
| 2018-06-25T19:39:00
| 138,404,105
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# Generated by Django 2.0.5 on 2018-06-23 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BCIDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.CharField(max_length=50, unique=True)),
],
),
]
|
[
"skarzynski_lukasz@protonmail.com"
] |
skarzynski_lukasz@protonmail.com
|
b594bc5ed8b10f098540111dac731858955d269e
|
1d5b2b72d322dd154a8efb547290ad5abb1fd098
|
/work_dir/py_rpc/test1/client.py
|
cd739de1ff7000803d3cdcf24557c405ba6060ac
|
[] |
no_license
|
hxzwd/drafts
|
6b593b50cae309c02495a8aff28719f7b636962d
|
478f4a4c399ab0c7c3f8f6e22d13131488716e4d
|
refs/heads/master
| 2020-04-28T01:42:58.998610
| 2019-05-05T17:49:48
| 2019-05-05T17:49:48
| 174,868,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
import rpyc
host_addr = "localhost"
port = 18812
c = rpyc.connect(host_addr, port)
c.root.func_test("TEST MESSAGE! HELLO")
|
[
"="
] |
=
|
929ef551e4a4ed39cde8a606b1b2829d341e61c9
|
942f0b081d2271978ffe20fbbfa8d687b57e5c02
|
/daily_coding_challenges/challenges/mapping_of_digits.py
|
53d0d8d458ffa10f31cc7f656983e1ff6e8da511
|
[] |
no_license
|
simtb/coding-puzzles
|
99762322606bb505d82924d4d5843db1c04aafbd
|
9e1d53e35b2117240eb357d7930cdb8cfd891c8e
|
refs/heads/master
| 2021-04-12T15:46:40.181048
| 2021-02-28T23:47:36
| 2021-02-28T23:47:36
| 249,089,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
"""
This problem was asked by Yelp.
Given a mapping of digits to letters (as in a phone number), and a digit string, return all possible letters the number could represent. You can assume each valid number in the mapping is a single digit.
For example if {“2”: [“a”, “b”, “c”], 3: [“d”, “e”, “f”], …} then “23” should return [“ad”, “ae”, “af”, “bd”, “be”, “bf”, “cd”, “ce”, “cf"].
"""
from typing import List
def solution(map_of_digits: dict, digit: str) -> List[str]:
pass
|
[
"simeon@Sims-MacBook-Pro.local"
] |
simeon@Sims-MacBook-Pro.local
|
63f8277e6c153482a0737d7d7fc2be4065b35437
|
4926667354fa1f5c8a93336c4d6e2b9f6630836e
|
/1318.py
|
39050eeca9d4be13c770e32b2ee0404761fb2afa
|
[] |
no_license
|
nascarsayan/lintcode
|
343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e
|
4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4
|
refs/heads/master
| 2021-07-13T12:31:45.883179
| 2020-07-20T02:27:53
| 2020-07-20T02:27:53
| 185,825,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
class Solution:
"""
@param nums: the given array
@param k: the given k
@param t: the given t
@return: whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j]
is at most t and the absolute difference between i and j is at most k.
"""
def containsNearbyAlmostDuplicate(self, nums, k, t):
# Write your code here
size = len(nums)
bucket, wt = {}, t + 1
for i in range(size):
bi = nums[i] // wt
for bj in range(bi - 1, bi + 2):
if bj in bucket and abs(bucket[bj] - nums[i]) <= t:
return True
bucket[bi] = nums[i]
if i >= k:
del bucket[nums[i - k] // wt]
return False
|
[
"nascarsayan@iitkgp.ac.in"
] |
nascarsayan@iitkgp.ac.in
|
ded03050bd48a80ff92b49a40d0a4295a2803b38
|
bb2add9ca1fdd3e33b2c28d494d497b62af121ae
|
/setup.py
|
e2be87663ddf37084b2e742b0b56e5633964c225
|
[
"MIT"
] |
permissive
|
wegamekinglc/simpleutils
|
4a48b0eae783ad74e3c16454ea04fff88a6b7796
|
1e3c89f72347b94538267c6f66f87c2cabdb18dd
|
refs/heads/master
| 2021-07-26T01:09:27.582528
| 2020-03-17T02:19:28
| 2020-03-17T02:19:28
| 90,547,150
| 0
| 1
| null | 2017-07-22T19:21:26
| 2017-05-07T16:44:07
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
import io
from setuptools import setup
from setuptools import find_packages
setup(
name='simpleutils',
version='0.2.5',
packages=find_packages(),
url='',
license='MIT',
author='wegamekinglc',
author_email='wegamekinglc@hotmail.com',
install_requires=io.open("requirements.txt", encoding='utf8').read(),
description=''
)
|
[
"scrappedprince.li@gmail.com"
] |
scrappedprince.li@gmail.com
|
4c981b7947331d05e0ffc54bf6c0a5898cc085f0
|
7c2e677d931a8eb7d7cffc6d54713411abbe83e4
|
/AppBuilder9000/AppBuilder9000/ZPYLP0914/HikingApp/migrations/0003_auto_20200925_1746.py
|
b15a2782dcbe732ed55cad1047fa3495f381341d
|
[] |
no_license
|
r3bunker/Python_Live_Project
|
19e367b3cf74c2279c287fcd3a8a44a27f24041a
|
d3e06150d7daea6326cc1a4155309d99e4ff6244
|
refs/heads/main
| 2023-06-12T23:01:50.440371
| 2021-06-16T20:21:03
| 2021-06-16T20:21:03
| 344,883,966
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Generated by Django 2.2.5 on 2020-09-26 00:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('HikingApp', '0002_auto_20200923_2326'),
]
operations = [
migrations.AlterField(
model_name='hike_preferences',
name='nick_name',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='HikingApp.create_happ_user'),
),
]
|
[
"r3bunker@gmail.com"
] |
r3bunker@gmail.com
|
09fce38313a60fd7a92b0eb090a3706cabaa6808
|
62b7c6baaef93a603ecc4eb0c6a140e7b133b577
|
/data/modules/exploit/macos/stager/membrane_reverse_tcp/core/handler.py
|
e25ac93798adf8402e51e6d675d78b3a9bdf9870
|
[
"MIT"
] |
permissive
|
sashka3076/ZetaSploit
|
eca5badfcb796c95aa2c8036b3814d5687a2f7b8
|
f5de0c10fb811f4b6f358412f6d848a7da8080fd
|
refs/heads/main
| 2023-03-03T04:43:06.137871
| 2021-02-05T18:37:18
| 2021-02-05T18:37:18
| 336,480,949
| 1
| 0
|
MIT
| 2021-02-06T07:27:20
| 2021-02-06T07:27:20
| null |
UTF-8
|
Python
| false
| false
| 2,767
|
py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from data.modules.exploit.macos.stager.membrane_reverse_tcp.core.terminator import terminator
class handler:
def __init__(self, client):
self.client = client
self.terminator = terminator()
def sendterm(self):
terminator = self.terminator.generate_terminator()
self.client.send((terminator + '\x04').encode())
return terminator
def send(self, buffer):
if not isinstance(buffer, bytes):
buffer = buffer.encode()
self.client.send(buffer + '\x04'.encode())
def sendall(self, buffer):
terminator = self.sendterm()
if not isinstance(buffer, bytes):
buffer = buffer.encode()
self.client.send(buffer + '\x04'.encode())
return terminator
def recvstr(self, char='\n'):
result = self.recvall(char)
return result
def recvall(self, terminator):
result = b''
while 1:
data = self.client.recv(1024)
if terminator.encode() in data:
data = data.replace(terminator.encode(), b'')
result += data
break
else:
result += data
return result
def recvfile(self, terminator, input_file):
output_file = open(input_file, "wb")
while 1:
data = self.client.recv(1024)
if terminator.encode() in data:
data = data.replace(terminator.encode(), b'')
output_file.write(data)
break
else:
output_file.write(data)
output_file.close()
|
[
"enty8080@gmail.com"
] |
enty8080@gmail.com
|
4a36aec0bb29c9ee7b1376c66f7e37711c95c169
|
a71fbf421c43fcb34fe7c8000eb807677821683c
|
/keras/keras06_RMSE.py
|
7d109b56293501423f78ec268a42c551f20160cf
|
[] |
no_license
|
leekyunghun/bit_seoul
|
ccd96dca3774f259e04b8388e134d6183b974268
|
b76a3d5f83b77f5345d61cf3baa68aaefc25cd2a
|
refs/heads/master
| 2023-02-06T08:12:17.768076
| 2020-12-22T13:35:19
| 2020-12-22T13:35:19
| 311,286,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
import numpy as np
#1. 데이터 # 선형회귀 예제
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15])
y_test = np.array([11,12,13,14,15])
x_pred = np.array([16,17,18])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#2.모델 구성
model = Sequential() # model이 Sequential이라고 선언
model.add(Dense(32, input_dim = 1)) # input_dim = 1 => input이 1차원 #model.add(Dense(32, input_dim = 1)) 값이 잘 나옴
model.add(Dense(16)) #model.add(Dense(16))
model.add(Dense(8)) #model.add(Dense(8))
model.add(Dense(1)) #model.add(Dense(1))
#3.컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae']) # metrics = 평가방식 (훈련중에 연산한 내용을 보여주는역할)
model.fit(x_train, y_train, epochs = 100, batch_size = 1) # model.fit => 모델을 훈련시킴
# model.fit(x_train, y_train, epochs = 100) # model.fit => 모델을 훈련시킴
#4.평가, 예측
# loss, acc = model.evaluate(x_test, y_test, batch_size = 1)
loss = model.evaluate(x_test, y_test) # evaluate의 디폴트는 loss값 metrics에 추가한 값들이 evaluate 출력에 포함
print("loss : ", loss)
# print("acc : ", acc)
y_predict = model.predict(x_test) # 예측값 확인
print("결과물: \n :", y_predict)
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE: ", RMSE(y_test, y_predict))
|
[
"oh_pizza@naver.com"
] |
oh_pizza@naver.com
|
3f8c227967ee0fd61a7c3e634b1b36f2412658e8
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc013/A/4657608.py
|
a1c4dd03b11eae7bc30d1cf841829c266962862f
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
N, M, L = map(int, input().split())
P, Q, R = map(int, input().split())
Res = max((N//P)*(M//Q)*(L//R), (N//Q)*(M//R)*(L//P), (N//R)*(M//P)*(L//Q))
Res2 = max((N//Q)*(M//P)*(L//R),(N//R)*(M//Q)*(L//P), (N//P)*(M//R)*(L//Q))
print(max(Res, Res2))
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
3e50ccd03eaa5a52efbe94b7be2d3d9b816f48b6
|
7889f7f0532db6a7f81e6f8630e399c90438b2b9
|
/3.4.3/_downloads/83afb11a0261474b783405dd2737c8b4/marker_path.py
|
7a3894c1b84d42fa562aa9292d2baacac6e2009b
|
[] |
no_license
|
matplotlib/matplotlib.github.com
|
ef5d23a5bf77cb5af675f1a8273d641e410b2560
|
2a60d39490941a524e5385670d488c86083a032c
|
refs/heads/main
| 2023-08-16T18:46:58.934777
| 2023-08-10T05:07:57
| 2023-08-10T05:08:30
| 1,385,150
| 25
| 59
| null | 2023-08-30T15:59:50
| 2011-02-19T03:27:35
| null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
"""
===========
Marker Path
===========
Using a `~.path.Path` as marker for a `~.axes.Axes.plot`.
"""
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import numpy as np
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the circle with an internal cutout of the star
verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]])
codes = np.concatenate([circle.codes, star.codes])
cut_star = mpath.Path(verts, codes)
plt.plot(np.arange(10)**2, '--r', marker=cut_star, markersize=15)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.path`
# - `matplotlib.path.Path`
# - `matplotlib.path.Path.unit_regular_star`
# - `matplotlib.path.Path.unit_circle`
# - `matplotlib.axes.Axes.plot` / `matplotlib.pyplot.plot`
|
[
"quantum.analyst@gmail.com"
] |
quantum.analyst@gmail.com
|
1c7c39467f29261840f0dd971bee8ef850b22534
|
b5eaeded2af4417603d6592f29c81c2426397153
|
/catatan/migrations/0002_auto_20201111_0927.py
|
cb4367a8a60122829cf0c2231a5d667dc6c68e2d
|
[] |
no_license
|
giko99/sim-labsos
|
9871059115be511882c96be10b6baaa9874a86ae
|
1711607aa3cb087c1ec5efc2b337841eb0a1f33d
|
refs/heads/main
| 2023-01-10T06:38:17.209459
| 2020-11-12T10:04:40
| 2020-11-12T10:04:40
| 311,870,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# Generated by Django 2.2 on 2020-11-11 02:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catatan', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='catatan',
old_name='selang',
new_name='waktu_kegiatan',
),
]
|
[
"giko@students.unnes.ac.id"
] |
giko@students.unnes.ac.id
|
c888771fa263c8e74bf9f393fcdbb8476f204704
|
36add5afc63ec09d63b8a877c29c17391938ee5c
|
/.history/process_tweet_20201113145341.py
|
c00da9bce7d529f27130fc4cb4ab2841dd6d41c6
|
[] |
no_license
|
E-STAT/sentiment_api
|
e84eb04a9f21c7368ca20bdb97436ffea9f65f25
|
bd9ee0d78d9eac8b6448b96c2560611a64f7b79d
|
refs/heads/master
| 2023-01-12T13:06:14.654883
| 2020-11-20T11:30:22
| 2020-11-20T11:30:22
| 314,534,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
import re
REPLACE_BY_SPACE = re.compile('[/(){}\[\]\|,;&-_]') #punctuation to replace
def preprocess_text(text):
"""
Function to preprocess text: removes links, punctuation, spaces, non-alpha words and stop_words
Parameters
----------
text: str
a string to be preprocessed
Returns
-------
text: str
a preprocessed string
"""
self.text = text.lower() #lowercase
self.text = re.sub(r"http\S+", "", text) #replace links with ""
self.text = re.sub(r"\@\S+", "", text) #replace mentions with ""
self.text = re.sub(r"#\S+", "", text) #replace hashtags with ""
self.text = re.sub(r"won\'t", "would not", text) #deal with contractions
self.text = re.sub(r"n\'t", " not", text) #deal with contractions
self.text = REPLACE_BY_SPACE.sub(' ', text) #replace punctuation with space
self.text = [word.strip() for word in text.split()] #strip space from words
self.text = [word for word in text if len(word)>2] #removing words less than 2 characters
self.text = [word for word in text if word!='amp'] #removing twitter amp
self.text = ' '.join(text)
return self.text
test = preprocess_text("Hello, this is Ernest @OwojoriErnest. #EndSars")
print(test)
|
[
"owojori.tolulope@gmail.com"
] |
owojori.tolulope@gmail.com
|
b70eb58802e5dbe8a525a194f8c647b1713059b2
|
f98f4aaeca3ac841905e0cd8547bbf41944fe690
|
/编程语言/Python/Python编程从入门到实践/第一部分_基础知识/第11章_测试代码/11_1/test_cities.py
|
8a129dc96c26bbe02c29536932c7670c18d25732
|
[] |
no_license
|
zhb339/book-learning
|
64f433b1ee1f66f3120828352df3b533be4cf9de
|
5273fc8d11b2d602484dbe95e55f1e931858382f
|
refs/heads/master
| 2020-03-29T10:48:22.771146
| 2018-11-07T13:46:53
| 2018-11-07T13:46:53
| 149,823,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import unittest
from city_functions import get_location
class CityTest(unittest.TestCase):
def test_city_country(self):
location = get_location("hangzhou", "china")
self.assertEqual(location, "hangzhou, china")
unittest.main()
|
[
"551788157@qq.com"
] |
551788157@qq.com
|
06c77303e95c817d8ebca414a1acfcf467f2abd7
|
2c6f77b281ee9c901a788b5617f26e73a3732762
|
/chapter7/insertionSort.py
|
130a3f6cf1f401844d03745fea0c27e898c779cd
|
[] |
no_license
|
Boberkraft/Data-Structures-and-Algorithms-in-Python
|
5db8ff814f4e954aca6701fabcc70900fe7012ff
|
60a5ef54f9cffb4dcdd3fc494cfeeb662d570ae9
|
refs/heads/master
| 2021-01-20T06:05:29.552453
| 2018-10-28T19:06:55
| 2018-10-28T19:06:55
| 89,841,233
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from PositinalList import PositionalList
def insertion_sort(l):
if len(l) > 1:
marker = L.first()
while marker != l.last():
pivot = l.after(marker)
value = pivot.element()
if value > marker.element():
marker = pivot
else:
walk = marker
while walk != l.first() and l.before(walk).element() > value:
walk = l.before(walk)
l.delete(pivot)
l.add_before(walk, value)
|
[
"andrzej.bisewski@gmail.com"
] |
andrzej.bisewski@gmail.com
|
d71b31be0ea0908049b6484535473a11033bc7ca
|
c11123ce1e86f8306dcc3bf5d017dbfa8bb1d515
|
/Easy/Pascal's Triangle.py
|
2d0b01d3d2e9680b1cc10cbe895915d5b9fe90ab
|
[] |
no_license
|
uathena1991/Leetcode
|
7e606c68a51ed09e6e6a9fad327b24066e92d0c4
|
e807ae43a0a253deaa6c9ed1c592fa3a14a6cab8
|
refs/heads/master
| 2021-05-01T15:21:25.568729
| 2019-10-13T14:33:30
| 2019-10-13T14:33:30
| 74,910,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1]]
if numRows == 0:
return []
if numRows == 1:
return [[1]]
for n in range(1,numRows):
res.append([1]*(n+1))
for j in range(1,n):
res[n][j] = res[n-1][j] + res[n-1][j-1]
return res
a = Solution()
print a.generate(7)
|
[
"xiaoli.he@rutgers.edu"
] |
xiaoli.he@rutgers.edu
|
5ae8b4f10739b6e6002264dbdfb90ab5fb784055
|
5d8bbb53d3ca8d532a1d977906a0fdf2aed6d46f
|
/ecoke/tests/test_views.py
|
e5c5f9a24e15e40c94f1b298ae200f8f7687d2b1
|
[] |
no_license
|
ErickMwazonga/e-Coke
|
da113d5cf2127b9947622008433057331c6a87f9
|
d72f6f2d5f6b5bb8d1a50c2fa6d22cf9a462874d
|
refs/heads/master
| 2021-01-20T07:16:00.763059
| 2018-03-20T12:25:43
| 2018-03-20T12:25:43
| 101,509,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,689
|
py
|
# from django.test import Client, TestCase
# from django.urls import reverse
# from django.contrib.auth import get_user_model
# from django.utils import timezone
#
# from ecoke.models import Brand
#
#
# def create_user():
# User = get_user_model()
# user = User(username='john', email='john@gmail.com', is_active=True)
# user.set_password('letmein')
# user.save()
# return user
#
#
# class IndexTestCase(TestCase):
# def setUp(self):
# user = create_user()
#
# self.client = Client()
# self.client.force_login(user)
#
# def test_correct_template_used(self):
# url = reverse('ecoke:index')
# res = self.client.get(url)
#
# self.assertTemplateUsed(res, 'ecoke/index.html')
# self.assertIn('This is a e-Coke Application where you can collect data based on brands...', res.content)
#
#
# class BrandTestCase(TestCase):
# def setUp(self):
# user = create_user()
#
# self.client = Client()
# self.client.force_login(user)
#
# # def test_data_posted(self):
# # url = reverse('ecoke:brand_create')
# # data = {
# # 'collector_name': 'Chepe',
# # 'respondent_name': 'Chitalo',
# # 'respondent_city': 'Matano Mane',
# # 'favourite_drink': 'Fuze',
# # 'date_of_collection': timezone.now().date()
# # }
# # res = self.client.post(url, data=data)
# # self.assertEqual(Brand.objects.count(), 1)
# #
# #
# # def test_data_update(self):
# # data = {
# # 'collector_name': 'Chepe',
# # 'respondent_name': 'Chitalo',
# # 'respondent_city': 'Matano Mane',
# # 'favourite_drink': 'Fuze',
# # 'date_of_collection': timezone.now().date()
# # }
# # brand = Brand.objects.create(**data)
# # url = reverse('ecoke:brand_update', kwargs={'pk':brand.pk})
# # data['collector_name'] = 'Biro'
# # res = self.client.post(url, data=data)
# #
# # self.assertEqual(Brand.objects.first().collector_name, 'Biro')
#
# def test_data_delete(self):
# data = {
# 'collector_name': 'Chepe',
# 'respondent_name': 'Chitalo',
# 'respondent_city': 'Matano Mane',
# 'favourite_drink': 'Fuze',
# 'date_of_collection': timezone.now().date()
# }
# brand = Brand.objects.create(**data)
# self.assertEqual(Brand.objects.count(), 1)
#
# url = reverse('ecoke:brand_delete', kwargs={'pk':brand.pk})
# res = self.client.post(url)
#
# self.assertEqual(Brand.objects.count(), 0)
|
[
"erickmwazonga@gmail.com"
] |
erickmwazonga@gmail.com
|
1c2b29364823e4d003628dd1a69238ac0f8c2f65
|
6fd26735b9dfd1d3487c1edfebf9e1e595196168
|
/2018/task05a.py
|
efc9e8f1ff974feab1583ece5da19fdb8705b276
|
[
"BSD-3-Clause"
] |
permissive
|
Kwpolska/adventofcode
|
bc3b1224b5272aa8f3a5c4bef1d8aebe04dcc677
|
8e55ef7b31a63a39cc2f08b3f28e15c2e4720303
|
refs/heads/master
| 2021-01-10T16:48:38.816447
| 2019-12-03T20:46:07
| 2019-12-03T20:46:07
| 47,507,587
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#!/usr/bin/env python3
import string
with open("input/05.txt") as fh:
file_data = fh.read().strip()
def solve(data):
while True:
out = data[0]
lout = data[0].lower()
for c in data[1:]:
lc = c.lower()
if lout and lout[-1] == lc and out[-1] != c:
out = out[:-1]
lout = lout[:-1]
else:
out += c
lout += lc
if out == data:
break
data = out
return len(data), data
test_data = "dabAcCaCBAcCcaDA"
test_output = solve(test_data)
test_expected = (10, "dabCBAcaDA")
print(test_output, test_expected)
assert test_output == test_expected
print(solve(file_data))
|
[
"kwpolska@gmail.com"
] |
kwpolska@gmail.com
|
0863e527af5d732af536a3f43a250d44e42f5214
|
bc441bb06b8948288f110af63feda4e798f30225
|
/ops_automation_sdk/model/topology/view_pb2.pyi
|
574a1e0fb8757902dfdc5b3c1c4fdc934341eb51
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,314
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from ops_automation_sdk.model.topology.area_pb2 import (
Area as ops_automation_sdk___model___topology___area_pb2___Area,
)
from ops_automation_sdk.model.topology.link_pb2 import (
Link as ops_automation_sdk___model___topology___link_pb2___Link,
)
from ops_automation_sdk.model.topology.node_pb2 import (
Node as ops_automation_sdk___model___topology___node_pb2___Node,
)
from ops_automation_sdk.model.topology.note_pb2 import (
Note as ops_automation_sdk___model___topology___note_pb2___Note,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class View(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Diff(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def addNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def removeNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def addLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
@property
def removeLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
def __init__(self,
*,
addNodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
removeNodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
addLinks : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
removeLinks : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> View.Diff: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View.Diff: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"addLinks",b"addLinks",u"addNodes",b"addNodes",u"removeLinks",b"removeLinks",u"removeNodes",b"removeNodes"]) -> None: ...
id = ... # type: typing___Text
name = ... # type: typing___Text
creator = ... # type: typing___Text
modifier = ... # type: typing___Text
readAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
writeAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
version = ... # type: typing___Text
ctime = ... # type: builtin___int
mtime = ... # type: builtin___int
@property
def rootNode(self) -> ops_automation_sdk___model___topology___node_pb2___Node: ...
@property
def nodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def links(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
@property
def areas(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___area_pb2___Area]: ...
@property
def notes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___note_pb2___Note]: ...
@property
def diff(self) -> View.Diff: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
modifier : typing___Optional[typing___Text] = None,
readAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
writeAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
version : typing___Optional[typing___Text] = None,
ctime : typing___Optional[builtin___int] = None,
mtime : typing___Optional[builtin___int] = None,
rootNode : typing___Optional[ops_automation_sdk___model___topology___node_pb2___Node] = None,
nodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
links : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
areas : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___area_pb2___Area]] = None,
notes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___note_pb2___Note]] = None,
diff : typing___Optional[View.Diff] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> View: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"diff",b"diff",u"rootNode",b"rootNode"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"areas",b"areas",u"creator",b"creator",u"ctime",b"ctime",u"diff",b"diff",u"id",b"id",u"links",b"links",u"modifier",b"modifier",u"mtime",b"mtime",u"name",b"name",u"nodes",b"nodes",u"notes",b"notes",u"readAuthorizers",b"readAuthorizers",u"rootNode",b"rootNode",u"version",b"version",u"writeAuthorizers",b"writeAuthorizers"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
a52188f6b0918565c35293d5307352a232165999
|
daa8ed885ab8cac1d0c568bb01cb7c6426ae545a
|
/arithmetic.py
|
64238ac84292597ced56bf9c3f77eb27f706d7f0
|
[] |
no_license
|
k8k/HBExercise02
|
f20cacc9761ab9d57be54416be0bc1a63967681d
|
de01b9a5466c662d823c1fbf16ff110684538d54
|
refs/heads/master
| 2021-01-19T06:18:35.253982
| 2014-10-02T01:03:33
| 2014-10-02T01:03:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
def add(num1, num2):
return (num1 + num2)
def subtract(num1, num2):
return (num1 - num2)
def multiply(num1, num2):
return (num1 * num2)
def divide(num1, num2):
return (float(num1) / num2)
def square(num1):
return (num1 ** 2)
def cube(num1):
return (num1 ** 3)
def power(num1, num2):
return (num1 ** num2)
print (num1 ** num2)
def mod(num1, num2):
return (num1 % num2)
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
a491e7f4fe918a165410005e4a2a8a193a1fdc41
|
6bc7062b2f99d0c54fd1bb74c1c312a2e3370e24
|
/crowdfunding/projects/migrations/0011_auto_20200823_1422.py
|
c685f8e23363ce08211f7cb85847cbf765624d1e
|
[] |
no_license
|
marinkoellen/drf-proj
|
f2d1f539efb877df69d285bd2fe6d5e789709933
|
874549d68ab80a774988c83706bb7934e035de42
|
refs/heads/master
| 2022-12-25T16:53:52.187704
| 2020-10-03T03:54:06
| 2020-10-03T03:54:06
| 289,620,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Generated by Django 3.0.8 on 2020-08-23 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_auto_20200823_1421'),
]
operations = [
migrations.AlterField(
model_name='project',
name='date_created',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"ellen.marinko1@gmail.com"
] |
ellen.marinko1@gmail.com
|
6f2e0b21a9d3099d28f4f0e9656df7eb350b08b5
|
4086ded777dab91e3b88e376c9e86487ea4f670f
|
/src/Modules/Trinity.FFI/Trinity.FFI.Python/GraphEngine/Storage/cache_manager.py
|
1367785a64745e5cd5f3eed7f0436c7d8e31563d
|
[
"MIT"
] |
permissive
|
qdoop/GraphEngine
|
a618c5c7459036e24342288e5ae13a023e4c7228
|
d83381c781edc4040824c1e31057789939530eff
|
refs/heads/master
| 2020-03-07T03:28:56.175677
| 2018-03-30T00:35:52
| 2018-03-30T00:35:52
| 127,236,851
| 0
| 0
|
MIT
| 2018-03-29T04:30:59
| 2018-03-29T04:30:59
| null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
from GraphEngine import GraphMachine
def filter_null_args(arg_list):
return filter(lambda x: x is not None, arg_list)
class CacheManager:
is_accessor = False
inst = None
module_id = -1
def load_cell(self, cell_id):
return self.inst.LoadCell(cell_id)
def save_cell(self, index=None, cell_id=None, write_ahead_log_options=None):
return self.inst.SaveCell(*filter_null_args((write_ahead_log_options, cell_id, index)))
def get_id(self, index):
return self.inst.CellGetId(index)
def get_field(self, index, field_name):
return self.inst.CellGetField(index, field_name)
def set_field(self, index, field_name, value):
return self.inst.CellSetField(index, field_name, value)
def append_field(self, index, field_name, content):
return self.inst.CellAppendField(index, field_name, content)
def remove_field(self, index, field_name):
return self.inst.CellRemoveField(index, field_name)
def delete(self, index):
return self.inst.Del(index)
def dispose(self):
return self.inst.Dispose()
@staticmethod
def remove_cell(cell_id):
GraphMachine.storage.RemoveCellFromStorage(cell_id)
GraphMachine.id_allocator.dealloc(cell_id)
class CellAccessorManager(CacheManager):
def __init__(self):
self.inst = GraphMachine.storage.CellAccessorManager()
self.module_id = self.inst.ModuleId
self.is_accessor = False
def use_cell(self, cell_id, options=None, cell_type=None):
return self.inst.UseCell(cell_id, *filter_null_args((options, cell_type)))
def __enter__(self):
# TODO
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# TODO
self.inst.Dispose()
del self.inst
class CellManager(CacheManager):
def __init__(self):
self.inst = GraphMachine.storage.CellManager()
self.module_id = self.inst.ModuleId
self.is_accessor = True
def new_cell(self, cell_type, cell_id=None, content=None):
return self.inst.NewCell(*filter_null_args((cell_type, cell_id, content)))
|
[
"twshere@outlook.com"
] |
twshere@outlook.com
|
53ab5d874fe9b4a2789f77ec49d2cfe1ddc0e0fd
|
f0d0ea29240c53b6ce1c4b06095b528ece02fdd7
|
/utils/stdvalue.py
|
93e1cb555fd015df9031244044dc821b516d2157
|
[] |
no_license
|
zhifuliu/dianjing
|
477529ccd6159329e1bc121aeb2ff328ee499f4a
|
7b3f6d58f5bc0738651d8d72c9a24df4ade0ed36
|
refs/heads/master
| 2020-03-21T09:10:28.343268
| 2017-03-24T03:06:24
| 2017-03-24T03:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
# -*- coding: utf-8 -*-
"""
Author: Wang Chao <yueyoum@gmail.com>
Filename: stdvalue
Date Created: 2016-09-30 18:30
Description:
"""
MAX_INT = 2 ** 31 - 1
|
[
"yueyoum@gmail.com"
] |
yueyoum@gmail.com
|
24a2a2e09bb18264bdfb641a5ed9b3275d98e05b
|
a9f56cfe2fafc7981ae4d37afd49d3049d2f98b9
|
/t_sms/models.py
|
b4639b553775a4ebef0802490b6ce52b673e181b
|
[] |
no_license
|
talhajubair100/django_allauth_language_currence_test
|
78dc340e4f3392c0dd18cd2e666c02ae04eba7bc
|
9530d548ea092145bc583b8a839a9bac891ef098
|
refs/heads/main
| 2023-02-21T20:25:42.332322
| 2021-01-24T14:10:13
| 2021-01-24T14:10:13
| 326,125,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
from django.db import models
import os
from twilio.rest import Client
# Create your models here.
class Detail(models.Model):
name = models.CharField(max_length=150)
phone = models.CharField(max_length=14)
details = models.CharField(max_length=200)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
account_sid = '****************'
auth_token = '******************'
client = Client(account_sid, auth_token)
message = client.messages.create(
body=f'Hi {self.name}! {self.details}',
from_='******',
to='+8801735700187'
)
print('your sms id',message.sid)
return super().save(*args, **kwargs)
|
[
"talhajubair100.bd@gmail.com"
] |
talhajubair100.bd@gmail.com
|
2569dc664dc758ba28cbbdbad398093631bd023b
|
ccfd5f8efc0cf6a9ec1867860e5a16d19beaee90
|
/neobistime/events/migrations/0009_event_image.py
|
a338af06b377c6da5159e7c0fa0d4c60f2a9b68a
|
[] |
no_license
|
magina671/neobistime
|
bf9bc759acf39367e3be42164c05575c1b8ed7e8
|
6dc3f8c8195997cfd4cabd5666e89d6f80a7b95c
|
refs/heads/master
| 2022-12-26T21:37:16.171324
| 2020-10-01T18:37:31
| 2020-10-01T18:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# Generated by Django 2.2.6 on 2020-08-05 14:28
from django.db import migrations
import easy_thumbnails.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0008_auto_20200805_0623'),
]
operations = [
migrations.AddField(
model_name='event',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(default='event_imgs/default.jpg', upload_to='event_imgs/%Y/%m/%d/'),
),
]
|
[
"daniyarflash.m01@gmail.com"
] |
daniyarflash.m01@gmail.com
|
f11f47b153e424f308df2de51c0fff6667d742b7
|
34599596e145555fde0d4264a1d222f951f49051
|
/pcat2py/class/214bd114-5cc5-11e4-af55-00155d01fe08.py
|
b6a94cc57e3436b403a340fcb5162b16fe7d3c98
|
[
"MIT"
] |
permissive
|
phnomcobra/PCAT2PY
|
dc2fcbee142ce442e53da08476bfe4e68619346d
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
refs/heads/master
| 2021-01-11T02:23:30.669168
| 2018-02-13T17:04:03
| 2018-02-13T17:04:03
| 70,970,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
#!/usr/bin/python
################################################################################
# 214bd114-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "214bd114-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4', '1604')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4', ('1604=' + str(dword))]
if dword == 3:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4' -name '1604' -value 3 -Type DWord")
|
[
"phnomcobra@gmail.com"
] |
phnomcobra@gmail.com
|
b8cec1ad804460eb9ff1f586909b5ef9f9b0f4bc
|
813eb2e364262450d43263a524074098a8934bf1
|
/gtdtst.py
|
9d9fa734bab2f939323fd8d55ac1555b71016cfc
|
[] |
no_license
|
cniekel/gtdtool
|
67f0e9ab4622f5361959ed51e4369523f6cbccc3
|
38a23806efd697814c9ebc492d87f88403ace041
|
refs/heads/master
| 2016-09-06T16:15:31.820358
| 2008-12-11T21:43:46
| 2008-12-11T21:43:46
| 88,992
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Frame, PageTemplate, BaseDocTemplate
from reportlab.platypus import KeepTogether
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch,cm
from reportlab.lib.pagesizes import A4
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
class GTDPageTemplate(PageTemplate):
def __init__(self, id, pageSize=defaultPageSize):
self.pageWidth = pageSize[0]
self.pageHeight = pageSize[1]
border = 0.3*cm
miniwidth = (self.pageWidth / 2) - 2*border
miniheight = (self.pageHeight / 2) - 2*border
#self.f4 = Frame(border, border, miniwidth, miniheight, id='p4')
#self.f1 = Frame(self.pageWidth/2 + border, border, miniwidth, miniheight, id='p1')
#self.f2 = Frame(self.pageWidth/2 + border, self.pageHeight/2 + border, miniwidth, miniheight, id='p2')
#self.f3 = Frame(border, self.pageHeight/2 + border, miniwidth, miniheight, id='p3')
self.f4 = Frame(0, 0, miniwidth, miniheight, id='p4')
self.f1 = Frame(miniwidth, 0, miniwidth, miniheight, id='p1')
self.f2 = Frame(miniwidth, miniheight, miniwidth, miniheight, id='p2')
self.f3 = Frame(0, miniheight, miniwidth, miniheight, id='p3')
PageTemplate.__init__(self, id, [self.f1, self.f2, self.f3, self.f4])
class GTDDocTemplate(BaseDocTemplate):
def __init__(self, file, **kw):
BaseDocTemplate.__init__(self,file, **kw)
def myPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman', 9)
canvas.drawString(inch, 0.75*inch, 'Page %d %s' % (doc.page, 'test')
def go():
doc = GTDDocTemplate('phello.pdf', pageSize=A4)
doc.addPageTemplates(GTDPageTemplate('gtd', doc.pagesize))
Story=[]
style = styles['Normal']
style.leftIndent = 0.75*inch
style.firstLineIndent = 0
style.spaceAfter = 3
style.refresh()
style.listAttrs()
for i in range(8):
bogustext = ('This is <br>paragraph <font color="red"><i>number</i></font> %s' % i) * 20
p = KeepTogether([Paragraph('Yo %d'%i, style), Paragraph(bogustext, style)])
Story.append(p)
Story.append(Spacer(1, 0.2*inch))
doc.build(Story)#, onFirstPage=myPages, onLaterPages=myPages)
if __name__ == '__main__':
go()
|
[
"="
] |
=
|
d96e3355e5e62406b5e4338cbe35b7eaa929bbac
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/48/usersdata/102/16157/submittedfiles/estatistica.py
|
09d01593ac5da824c14d1683c4b56411be6a6d17
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
media = soma/len(lista)
return media
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
a=[]
b=[]
n=input('digite a quantidade de elementos :')
for i in range(0,n,1):
a.append(input('digite um elemento '))
soma=0
for i in range (0,n,1):
b.append(input('digite um elemnto '))
media_a= media(a)
media_b= media(b)
def desviopadrao(lista):
soma=0
for i in range (0,len(lista),1):
soma=soma+(lista[i]-media(lista))**2
s=((1/(len(lista)-1))*soma)**(1/2)
return s
s_a=s(a)
s_b=s(b)
print('%.2f :' %media_a)
print('%.2f:'%s_a)
print('%.2f:'%media_b)
print('%.2f:'%s_b)
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
dfd560b021f274cf620269da02b8991453c27075
|
cf6a50732d708a3a3db0f297b73cb6f449a00b44
|
/Practice15_Dict_Comprehensions/Prac_15_3_4lists.py
|
08394fddeab789aa6f4927a312461f0c8229a29e
|
[] |
no_license
|
subash319/PythonDepth
|
9fe3920f4b0a25be02a9abbeeb60976853ab812e
|
0de840b7776009e8e4362d059af14afaac6a8879
|
refs/heads/master
| 2022-11-16T03:03:56.874422
| 2020-07-17T01:19:39
| 2020-07-17T01:19:39
| 266,921,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
#
# names = ['Ted', 'Sam', 'Jim', 'Rob', 'Anu']
# maths = [98,67,54,88,95]
# physics = [88,64,78,99,78]
# chemistry = [78,67,45,79,87]
# These 4 lists contain the names and marks of students in 3 subjects.
# Write a dictionary comprehension to create the following dictionary from the above 4 lists.
# { 'Ted': [98, 88, 78],
# 'Sam': [67, 64, 67],
# 'Jim': [54, 78, 45],
# 'Rob': [88, 99, 79],
# 'Anu': [95, 78, 87] }
names = ['Ted', 'Sam', 'Jim', 'Rob', 'Anu']
maths = [98, 67, 54, 88, 95]
physics = [88, 64, 78, 99, 78]
chemistry = [78, 67, 45, 79, 87]
dict_list = {name : [math_marks, physics_marks, chemistry_marks] for name, math_marks, physics_marks, chemistry_marks in
zip(names, maths, physics, chemistry)}
print(dict_list)
|
[
"subas319@gmail.com"
] |
subas319@gmail.com
|
e8671cf5ba3ec2529608a9c484cbb899180c4e45
|
439c87c48c6c2c812d1faca73cbf1b632e9403dc
|
/DAYS/Day23/Frequency_can_become_same.py
|
e3d4dbf245a9234ce1844db16bb8f4940835985b
|
[
"MIT"
] |
permissive
|
saubhagyav/100_Days_Code_Challenge
|
14ca8db68e09c7ac7741f164fea8b62cb36bf2c0
|
bde41126b9342eacc488c79d01dc4b76a3651c93
|
refs/heads/main
| 2023-08-05T03:12:18.918079
| 2021-09-12T12:20:41
| 2021-09-12T12:20:41
| 389,375,066
| 2
| 2
| null | 2021-07-25T15:06:18
| 2021-07-25T15:06:17
| null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from collections import Counter
def Check_Frequency(Test_string):
Test_dict = Counter(Test_string)
Result = list(set(Test_dict.values()))
if len(Result) > 2:
return False
elif len(Result) == 2 and Result[1]-Result[0] > 1:
return False
else:
return True
Test_string = "xxxyyzz"
if Check_Frequency(Test_string):
print("Yes")
else:
print("No")
|
[
"noreply@github.com"
] |
saubhagyav.noreply@github.com
|
960ac25a67fbf72976ab75e6b8dbf05281553012
|
758bf41e46a3093f4923af603f1f7f8063408b9c
|
/website/testFromRemoteRepo/_bsch3398/museum/python/django/contrib/gis/gdal/__init__.py
|
7c3a03b42b7bc94fdbb03cfa9c6a54e206c549f2
|
[
"BSD-3-Clause"
] |
permissive
|
mpetyx/mpetyx.com
|
4033d97b21c9227a6ba505980fd0c1b57254e8fb
|
d50c379b4fe09e0135656573f7049225fc90ae36
|
refs/heads/master
| 2021-01-10T19:50:15.488371
| 2014-01-22T09:04:14
| 2014-01-22T09:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
[
"mpetyx@gmail.com"
] |
mpetyx@gmail.com
|
703d10b745c8000288c553adcd42e3990d554b8b
|
1b1a30bfa44ad30fa6fb4ac2e6254d9ff2bf9d46
|
/keytree/tests/test_write.py
|
de9d42f3b86e230c671961a2444f2286f439f14f
|
[] |
no_license
|
imclab/keytree
|
bff25eee2c8e82aa95062ddbfdda32b0b3a0759b
|
db7dc932af92d4367bdb934632327c6d9963c2cb
|
refs/heads/master
| 2020-12-13T19:14:31.044506
| 2012-12-28T23:20:06
| 2012-12-28T23:20:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
from unittest import TestCase
from xml.etree import ElementTree as etree
from keytree import element
KML = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
</Document>
</kml>
"""
class ElementWriterTestCase(TestCase):
def setUp(self):
self.doc = etree.fromstring(KML)
def test_element(self):
f = {
'id': '1',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)},
'properties': {
'title': 'one',
'description': 'Point one' } }
elem = element(self.doc, f)
self.failUnlessEqual(
elem.tag, '{http://www.opengis.net/kml/2.2}Placemark' )
self.failUnlessEqual(elem.attrib['id'], '1')
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}name').text,
'one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Snippet').text,
'Point one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Point').find(
'{http://www.opengis.net/kml/2.2}coordinates').text,
'0.000000,0.000000,0.0' )
def test_element_kw(self):
f = {
'id': '1',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)},
'properties': {} }
elem = element(self.doc, f, name='one', snippet='Point one')
self.failUnlessEqual(
elem.tag, '{http://www.opengis.net/kml/2.2}Placemark' )
self.failUnlessEqual(elem.attrib['id'], '1')
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}name').text,
'one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Snippet').text,
'Point one' )
|
[
"sean.gillies@gmail.com"
] |
sean.gillies@gmail.com
|
514a7cb7000a793365eac04a7428ab887b3fab7f
|
adb759899204e61042225fabb64f6c1a55dac8ce
|
/14500~14599/14501.py
|
ffaf69b3c4a49b459ac1f40535dc459b3e8d71b2
|
[] |
no_license
|
geneeol/baekjoon-online-judge
|
21cdffc7067481b29b18c09c9152135efc82c40d
|
2b359aa3f1c90f178d0c86ce71a0580b18adad6f
|
refs/heads/master
| 2023-03-28T23:25:12.219487
| 2021-04-01T09:19:06
| 2021-04-01T09:19:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,439
|
py
|
# 문제
# 상담원으로 일하고 있는 백준이는 퇴사를 하려고 한다.
# 오늘부터 N+1일째 되는 날 퇴사를 하기 위해서, 남은 N일 동안 최대한 많은 상담을 하려고 한다.
# 백준이는 비서에게 최대한 많은 상담을 잡으라고 부탁을 했고, 비서는 하루에 하나씩 서로 다른 사람의 상담을 잡아놓았다.
# 각각의 상담은 상담을 완료하는데 걸리는 기간 Ti와 상담을 했을 때 받을 수 있는 금액 Pi로 이루어져 있다.
# N = 7인 경우에 다음과 같은 상담 일정표를 보자.
# 1일 2일 3일 4일 5일 6일 7일
# Ti 3 5 1 1 2 4 2
# Pi 10 20 10 20 15 40 200
# 1일에 잡혀있는 상담은 총 3일이 걸리며, 상담했을 때 받을 수 있는 금액은 10이다.
# 5일에 잡혀있는 상담은 총 2일이 걸리며, 받을 수 있는 금액은 15이다.
# 상담을 하는데 필요한 기간은 1일보다 클 수 있기 때문에, 모든 상담을 할 수는 없다.
# 예를 들어서 1일에 상담을 하게 되면, 2일, 3일에 있는 상담은 할 수 없게 된다.
# 2일에 있는 상담을 하게 되면, 3, 4, 5, 6일에 잡혀있는 상담은 할 수 없다.
# 또한, N+1일째에는 회사에 없기 때문에, 6, 7일에 있는 상담을 할 수 없다.
# 퇴사 전에 할 수 있는 상담의 최대 이익은 1일, 4일, 5일에 있는 상담을 하는 것이며, 이때의 이익은 10+20+15=45이다.
# 상담을 적절히 했을 때, 백준이가 얻을 수 있는 최대 수익을 구하는 프로그램을 작성하시오.
# 입력
# 첫째 줄에 N (1 ≤ N ≤ 15)이 주어진다.
# 둘째 줄부터 N개의 줄에 Ti와 Pi가 공백으로 구분되어서 주어지며, 1일부터 N일까지 순서대로 주어진다. (1 ≤ Ti ≤ 5, 1 ≤ Pi ≤ 1,000)
# 출력
# 첫째 줄에 백준이가 얻을 수 있는 최대 이익을 출력한다.
n = int(input())
max_pay = [0] * n
table = [list(map(int, input().split())) for _ in range(n)]
for i in range(n - 1, -1, -1):
current_day, current_pay = table[i][0], table[i][1]
if current_day > n - i:
if i != n - 1:
max_pay[i] = max_pay[i + 1]
continue
if i == n - 1:
max_pay[i] = current_pay
elif i + current_day == n:
max_pay[i] = max(current_pay, max_pay[i + 1])
else:
max_pay[i] = max(current_pay + max_pay[i + current_day], max_pay[i + 1])
print(max_pay[0])
|
[
"alstn2468_@naver.com"
] |
alstn2468_@naver.com
|
eae1209cde503300621b2e4cd63576ae312a868e
|
699a43917ce75b2026a450f67d85731a0f719e01
|
/using_python/322_coin_change/coin_change.py
|
453bc631d8c72e4e6d1d021cb3b8598456640723
|
[] |
no_license
|
wusanshou2017/Leetcode
|
96ab81ae38d6e04739c071acfc0a5f46a1c9620b
|
c4b85ca0e23700b84e4a8a3a426ab634dba0fa88
|
refs/heads/master
| 2021-11-16T01:18:27.886085
| 2021-10-14T09:54:47
| 2021-10-14T09:54:47
| 107,402,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
class Solution:
def coinChange(self, coins: [int], amount: int) -> int:
dp = [0] + [float("inf")] *amount
for coin in coins:
for i in range (coin , amount+1):
dp[i] = min (dp[i], dp[i-coin]+1)
return dp[amount] if dp[amount]!= float("inf") else -1
so =Solution()
print (so.coinChange())
|
[
"252652905@qq.com"
] |
252652905@qq.com
|
6828a17057599731cc595b26995b3930f8a78e82
|
7a15271c7cddd199f43555469a67d26ce0f60836
|
/uncertainty_baselines/models/vit_mimo.py
|
d218ad8262546522d746a03e843d3e3409b5167a
|
[
"Apache-2.0"
] |
permissive
|
google/uncertainty-baselines
|
b2c339d918bf3949ee066f9eafa6b51232a2ac3d
|
f5f6f50f82bd441339c9d9efbef3f09e72c5fef6
|
refs/heads/main
| 2023-09-02T13:59:26.355288
| 2023-08-14T16:35:22
| 2023-08-14T16:36:11
| 280,026,201
| 1,235
| 198
|
Apache-2.0
| 2023-09-11T22:21:48
| 2020-07-16T01:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,957
|
py
|
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIMO Vision Transformer model."""
from typing import Any, Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
from uncertainty_baselines.models import vit
Array = Any
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
class VisionTransformerMIMO(nn.Module):
"""MIMO Vision Transformer model."""
num_classes: int
patches: Any
transformer: Any
hidden_size: int
ensemble_size: int
representation_size: Optional[int] = None
classifier: str = 'token'
fix_base_model: bool = False
@nn.compact
def __call__(self, inputs, *, train):
"""Function of shapes [B*R,h,w,c*E] -> [E*B*R,num_classes]."""
out = {}
x = inputs
# We can merge s2d+emb into a single conv; it's the same.
x = nn.Conv(
features=self.hidden_size,
kernel_size=self.patches.size,
strides=self.patches.size,
padding='VALID',
name='embedding')(
x)
# Here, x is a grid of embeddings.
# TODO(dusenberrymw): Switch to self.sow(.).
out['stem'] = x
# Transformer.
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# If we want to add a class token, add it here.
if self.classifier == 'token':
cls = self.param('cls', nn.initializers.zeros, (1, 1, c))
cls = jnp.tile(cls, [n, 1, 1])
x = jnp.concatenate([cls, x], axis=1)
x = vit.Encoder(name='Transformer', **self.transformer)(x, train=train)
out['transformed'] = x
if self.classifier == 'token':
x = x[:, 0]
elif self.classifier == 'gap':
x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2)
else:
raise ValueError(f'Invalid classifier={self.classifier}')
out['head_input'] = x
if self.representation_size is not None:
x = nn.Dense(features=self.representation_size, name='pre_logits')(x)
out['pre_logits'] = x
x = nn.tanh(x)
else:
x = vit.IdentityLayer(name='pre_logits')(x)
out['pre_logits'] = x
# TODO(markcollier): Fix base model without using stop_gradient.
if self.fix_base_model:
x = jax.lax.stop_gradient(x)
# Shape: (batch_size, num_classes * ensemble_size).
x = nn.Dense(self.num_classes * self.ensemble_size,
name='head',
kernel_init=nn.initializers.zeros)(x)
# Shape: (batch_size * ensemble_size, num_classes).
x = jnp.concatenate(jnp.split(x, self.ensemble_size, axis=-1))
out['logits'] = x
return x, out
def vision_transformer_mimo(num_classes: int,
patches: Any,
transformer: Any,
hidden_size: int,
ensemble_size: int,
representation_size: Optional[int] = None,
classifier: str = 'token',
fix_base_model: bool = False):
"""Builds a BatchEnsemble Vision Transformer (ViT) model."""
# TODO(dusenberrymw): Add API docs once the config dict in VisionTransformerBE
# is cleaned up.
return VisionTransformerMIMO(
num_classes=num_classes,
patches=patches,
transformer=transformer,
hidden_size=hidden_size,
ensemble_size=ensemble_size,
representation_size=representation_size,
classifier=classifier,
fix_base_model=fix_base_model)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
b7f7076f5afb4e3428632e25750722c00706b99e
|
4777728e147ef6a7d3af53ac89ad7e7527fa7b54
|
/scripts/match_districts.py
|
64a10b50e30424cad54373cf920d8a713cd27253
|
[
"MIT"
] |
permissive
|
meilinger/firecares
|
d8da365e355aec7c0f9f75da25fca655432d0254
|
fbcde3c1fb89a07b2b28d9039b49dca53b9b991b
|
refs/heads/master
| 2021-01-18T19:38:45.330532
| 2016-11-01T20:51:25
| 2016-11-01T20:51:38
| 41,003,852
| 0
| 0
| null | 2015-08-19T00:12:20
| 2015-08-19T00:12:20
| null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
import glob
import os
import sys
sys.path.append(os.pardir)
from firecares.firestation.management.commands.match_districts import Command
from firecares.firestation.models import FireDepartment, FireStation
from django.contrib.gis.geos import GeometryCollection as GC
import django
django.setup()
files = glob.glob(sys.argv[1]+'*districts*.geojson')
parsed_files = [(n.split('-')[1].upper(), n.split('-')[2], n) for n in files]
for state, name, path in parsed_files:
department = None
try:
department = FireDepartment.priority_departments.get(state=state, name__icontains=name.replace('_', ' '))
except FireDepartment.DoesNotExist:
if name == 'los_angeles_city':
department = FireDepartment.objects.get(id=87256)
c = Command()
c.handle(geojson_file=path, queryset=department.firestation_set.all())
geometry_collection = GC([n for n in department.firestation_set.all().values_list('district', flat=True) if n])
map(geometry_collection.append, [n for n in department.firestation_set.all().values_list('geom', flat=True) if n])
with open(os.path.join(sys.argv[1], 'processed', 'us-{0}-{1}-disticts_processed.geojson'.format(state.lower(), name, department.name.replace(' ', '_').lower())), 'w') as output:
output.write(geometry_collection.json)
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
fdc77083dbf90bdb66b304f6e10d6aff7b96f4ab
|
d703487f4c27b92310ad1e6674da8b48e2095bc8
|
/common/logic/fish_array_3.py
|
b86373ab6e1bf7feef2f27aa2a5f5e9eace2c0b3
|
[] |
no_license
|
rolllyroman/fish_
|
0aaee7544650886c73eaf06d5bb420b409628d29
|
7b535f8a0bc875b96922121a29636aab4a20f803
|
refs/heads/master
| 2020-03-28T00:51:06.565231
| 2018-09-21T08:50:36
| 2018-09-21T08:50:36
| 147,454,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,711
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: $Author$
Date: $Date$
Revision: $Revision$
Description:
鱼阵3,等间距环形鱼阵
"""
from common.data.scene import WIDTH, HEIGHT, CENTER_X, CENTER_Y
from common.data.fish_init_areas import FishInitArea
from common.arith.point_math import Point
from common.data.fish_levels import FISH_LEVELS_DATA
from common.data.pickfish_consts import TOLERATE_LAG_SECS
import fish_array
from common.gameobject import GameObject
import random
import math
from common.pb_utils import pbAppendRoute
class FishArrayInit(GameObject):
def __init__(self, initArea, fishLevels, space, speed, counts):
self.initArea = initArea
self.fishLevels = fishLevels
self.space = space
self.speed = speed
self.counts = counts
class FishArray(fish_array.FishArray):
def __init__(self, fishMgr):
super(FishArray, self).__init__(fishMgr)
#基础速度
self.speed = 54
#每个环间鱼的间距
self.space = -20
self.init_areas = [
#左边的环
FishArrayInit(FishInitArea(Point(0, CENTER_Y), Point(0, CENTER_Y), Point(WIDTH, CENTER_Y), Point(WIDTH, CENTER_Y)), [[3], [7], [12], [16]], self.space, self.speed, [48, 30, 12, 1]), \
#右边的环
FishArrayInit(FishInitArea(Point(WIDTH, CENTER_Y), Point(WIDTH, CENTER_Y), Point(0, CENTER_Y), Point(0, CENTER_Y)), [[3], [7], [12], [17]], self.space, self.speed, [48, 30, 12, 1]), \
]
def genFishs(self):
self.genFishDatas = []
centerNLevelNRadius = []
for area in self.init_areas:
centerP, direct, endP = area.initArea.getPointNDirect()
count = len(area.fishLevels)
levelNedges = []
for i in xrange(count):
space = self.space
level = random.choice(area.fishLevels[i])
levelData = FISH_LEVELS_DATA[level]
if i == count - 1:
centerP = centerP + (-direct) * (levelData.width/2.0)
levelNedges.append((level, centerP))
else:
width = levelData.width
if i == count - 2:
width = width/4.0
space = self.space * 2
elif i == 0:
space = self.space
else:
space = self.space / 2
centerP = centerP + (-direct) * (width/2.0)
levelNedges.append((level, centerP))
centerP = centerP + (-direct) * (width/2.0 + space)
levelNRadius = []
for level, edgeP in levelNedges:
levelNRadius.append((level, centerP.getDist(edgeP)))
centerNLevelNRadius.append((centerP, levelNRadius))
longestDuration = 0
for idx, area in enumerate(self.init_areas):
centerP, levelNRadius = centerNLevelNRadius[idx]
initP, direct, endP = area.initArea.getPointNDirect()
#获取初始角度
rad = direct.toRadian()
initRot = math.degrees(rad)
for i in xrange(len(levelNRadius)):
level, radius = levelNRadius[i]
deltaAngle = (math.pi*2)/area.counts[i]
levelData = FISH_LEVELS_DATA[level]
for i in xrange(area.counts[i]):
offsetDir = direct.rotateSelfByRadian(deltaAngle*i).normalize()
startP = centerP + (offsetDir*radius)
curEndP = Point(endP.x, startP.y) + direct * (levelData.width/2.0)
duration = curEndP.getDist(startP)/area.speed
#优化,把初始化位置都设到屏幕外半个身位
realStartP = Point(initP.x, startP.y) + (-direct) * (levelData.width/2.0)
realDuration = curEndP.getDist(realStartP)/area.speed
if duration > longestDuration:
longestDuration = duration
self.genFishDatas.append(fish_array.FishInitData(0, level, levelData.order, initRot, \
realStartP.x, realStartP.y, realDuration, levelData.getMulti(), levelData.getPickedRate(), 0, \
pbAppendRoute([], 0, area.speed, realDuration + TOLERATE_LAG_SECS), \
fish_array.FISH_ARRAY_APPEAR_TICK + (duration - realDuration)*1000))
self.duration = longestDuration + TOLERATE_LAG_SECS
super(FishArray, self).genFishs()
|
[
"you@example.com"
] |
you@example.com
|
b1e07b55cc88e5fa72d84564acadc485d25057d2
|
16c8fdf291430475f40d578b0d64552eb64046e9
|
/colour/models/rgb/transfer_functions/nikon_nlog.py
|
21e0c13e1081c32f15d60dac95893527f306835d
|
[
"BSD-3-Clause"
] |
permissive
|
nodefeet/colour
|
4c1bfed87ce173ff878bdf288fd9828bb68022e3
|
319dd5b1c45aef6983eff1830f918c1e593fb530
|
refs/heads/develop
| 2022-02-19T17:39:36.657993
| 2022-02-15T08:38:26
| 2022-02-15T08:38:26
| 460,456,444
| 0
| 0
|
BSD-3-Clause
| 2022-02-17T13:53:37
| 2022-02-17T13:53:36
| null |
UTF-8
|
Python
| false
| false
| 5,529
|
py
|
"""
Nikon N-Log Log Encoding
========================
Defines the *Nikon N-Log* log encoding:
- :func:`colour.models.log_encoding_NLog`
- :func:`colour.models.log_decoding_NLog`
References
----------
- :cite:`Nikon2018` : Nikon. (2018). N-Log Specification Document - Version
1.0.0 (pp. 1-5). Retrieved September 9, 2019, from
http://download.nikonimglib.com/archive3/hDCmK00m9JDI03RPruD74xpoU905/\
N-Log_Specification_(En)01.pdf
"""
from __future__ import annotations
import numpy as np
from colour.algebra import spow
from colour.hints import (
Boolean,
FloatingOrArrayLike,
FloatingOrNDArray,
Integer,
)
from colour.models.rgb.transfer_functions import full_to_legal, legal_to_full
from colour.utilities import Structure, as_float, from_range_1, to_domain_1
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - http://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"NLOG_CONSTANTS",
"log_encoding_NLog",
"log_decoding_NLog",
]
NLOG_CONSTANTS: Structure = Structure(
cut1=0.328,
cut2=(452 / 1023),
a=(650 / 1023),
b=0.0075,
c=(150 / 1023),
d=(619 / 1023),
)
"""*Nikon N-Log* colourspace constants."""
def log_encoding_NLog(
in_r: FloatingOrArrayLike,
bit_depth: Integer = 10,
out_normalised_code_value: Boolean = True,
in_reflection: Boolean = True,
constants: Structure = NLOG_CONSTANTS,
) -> FloatingOrNDArray:
"""
Define the *Nikon N-Log* log encoding curve / opto-electronic transfer
function.
Parameters
----------
in_r
Linear reflection data :math`in`.
bit_depth
Bit depth used for conversion.
out_normalised_code_value
Whether the non-linear *Nikon N-Log* data :math:`out` is encoded as
normalised code values.
in_reflection
Whether the light level :math`in` to a camera is reflection.
constants
*Nikon N-Log* constants.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`out`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``in_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``out_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nikon2018`
Examples
--------
>>> log_encoding_NLog(0.18) # doctest: +ELLIPSIS
0.3636677...
"""
in_r = to_domain_1(in_r)
if not in_reflection:
in_r = in_r * 0.9
cut1 = constants.cut1
a = constants.a
b = constants.b
c = constants.c
d = constants.d
out_r = np.where(
in_r < cut1,
a * spow(in_r + b, 1 / 3),
c * np.log(in_r) + d,
)
out_r_cv = (
out_r if out_normalised_code_value else legal_to_full(out_r, bit_depth)
)
return as_float(from_range_1(out_r_cv))
def log_decoding_NLog(
out_r: FloatingOrArrayLike,
bit_depth: Integer = 10,
in_normalised_code_value: Boolean = True,
out_reflection: Boolean = True,
constants: Structure = NLOG_CONSTANTS,
) -> FloatingOrNDArray:
"""
Define the *Nikon N-Log* log decoding curve / electro-optical transfer
function.
Parameters
----------
out_r
Non-linear data :math:`out`.
bit_depth
Bit depth used for conversion.
in_normalised_code_value
Whether the non-linear *Nikon N-Log* data :math:`out` is encoded as
normalised code values.
out_reflection
Whether the light level :math`in` to a camera is reflection.
constants
*Nikon N-Log* constants.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear reflection data :math`in`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``out_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``in_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nikon2018`
Examples
--------
>>> log_decoding_NLog(0.36366777011713869) # doctest: +ELLIPSIS
0.1799999...
"""
out_r = to_domain_1(out_r)
out_r = (
out_r if in_normalised_code_value else full_to_legal(out_r, bit_depth)
)
cut2 = constants.cut2
a = constants.a
b = constants.b
c = constants.c
d = constants.d
in_r = np.where(
out_r < cut2,
spow(out_r / a, 3) - b,
np.exp((out_r - d) / c),
)
if not out_reflection:
in_r = in_r / 0.9
return as_float(from_range_1(in_r))
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
7e93265e3e2ddc5409ba10e0b89e8c4bba613615
|
09a2d0231caf5231875270ca85dba3bf201d83c7
|
/linotak/mentions/migrations/0004_incoming_received.py
|
1d50965cd7e957d46d12464362c62a542df6d4ea
|
[] |
no_license
|
pdc/linotak
|
898c2a014a2f2beed25127efc4b69db637c1a537
|
0075ea457f764cbb67acecb584e927bf58d2e7a8
|
refs/heads/develop
| 2023-03-09T19:39:59.013308
| 2022-12-19T19:58:49
| 2022-12-19T19:58:49
| 148,982,795
| 0
| 0
| null | 2023-02-15T20:20:01
| 2018-09-16T09:54:31
|
Python
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
# Generated by Django 3.0.8 on 2020-08-30 21:57
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("mentions", "0003_post_i18n"),
]
operations = [
migrations.AddField(
model_name="incoming",
name="received",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="received"
),
),
]
|
[
"pdc@alleged.org.uk"
] |
pdc@alleged.org.uk
|
575b7b48abf22f076b84e4ef15b11103ba8859b5
|
8cb8bfd2dae516612251039e0632173ea1ea4c8a
|
/modules/analyzes/door/doorsize/controller.py
|
9126c599a165028ae6febf61087f048a2c4fb0a5
|
[] |
no_license
|
nyzsirt/lift-prod
|
563cc70700d26a5812a1bce0bd9795998dce6e99
|
9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b
|
refs/heads/master
| 2020-04-22T01:05:42.262876
| 2019-02-09T13:31:15
| 2019-02-09T13:31:15
| 170,003,361
| 1
| 0
| null | 2019-02-10T17:11:50
| 2019-02-10T17:11:50
| null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from abstracts.abstract_resource_controller import AbstractResourceController
from modules.analyzes.door.doorsize.models import DoorSize
from modules.helper2 import helper
class ControllerDoorSize(AbstractResourceController):
def __init__(self):
self.helper = helper()
self.abstract = super(ControllerDoorSize, self)
self.main_model = DoorSize
self.default_kwargs = dict()
def get(self, get_args):
return self.abstract.get(get_args)
def create(self, data):
return self.abstract.create(data)
def update(self, mongoid, data):
return self.abstract.update(mongoid, data)
def delete(self, mongoid):
return self.abstract.delete(mongoid)
|
[
"mutlu.erdem@soft-nec.com"
] |
mutlu.erdem@soft-nec.com
|
6edd0fa849079349ad68374f58ae03751169c5a0
|
c4ab9448e4df33cc55db85dbf37afb4982edcab4
|
/isiscb/zotero/migrations/0003_auto_20160218_1614.py
|
33af73f3ff24f0fde46cfe84682b49e4b6333db4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
upconsulting/IsisCB
|
659c1293b3606fb797611e138deb234b41c617f5
|
6c20899bf9193cc4cc6b4a2efea24ae86cbc51eb
|
refs/heads/master
| 2023-09-01T17:23:14.538601
| 2023-03-19T21:05:29
| 2023-03-19T21:05:29
| 40,304,109
| 6
| 2
|
MIT
| 2023-07-30T15:04:05
| 2015-08-06T12:40:51
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-18 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zotero', '0002_auto_20160216_1622'),
]
operations = [
migrations.AlterField(
model_name='instanceresolutionevent',
name='to_instance_id',
field=models.CharField(max_length=1000),
),
]
|
[
"erick.peirson@asu.edu"
] |
erick.peirson@asu.edu
|
e16cb457cd2bdec0206f1fb45a2bc62bd0834ae8
|
29d09c634ffdd8cab13631d62bc6e3ad00df49bf
|
/Algorithm/baekjoon/13023_ABCDE.py
|
668731e67b568111b79707b4d426bc57551db84d
|
[] |
no_license
|
kim-taewoo/TIL_PUBLIC
|
f1d32c3b4f46344c1c99f02e95cc6d2a888a0374
|
ae86b542f8b1805b5dd103576d6538e3b1f5b9f4
|
refs/heads/master
| 2021-09-12T04:22:52.219301
| 2021-08-28T16:14:11
| 2021-08-28T16:14:11
| 237,408,159
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
found = False
import sys
input = lambda: sys.stdin.readline()
def dfs(v, cnt, origin):
global found
if found:
return
if cnt == 5:
found = True
return
if cnt > chk_max[origin]:
chk_max[origin] = cnt
for i in board[v]:
if not chk[i]:
if chk_max[i]:
if cnt + chk_max[i] < 5: continue
chk[i] = True
dfs(i, cnt + 1, origin)
chk[i] = False
n,m = map(int, input().split())
board = [set() for _ in range(n)]
for i in range(m):
a, b = map(int, input().split())
board[a].add(b)
board[b].add(a)
chk_max = [0 for _ in range(n)]
chk = [False for _ in range(n)]
for i in range(n):
if found:
break
chk[i] = True
dfs(i, 1, i)
chk[i] = False
if found:
print(1)
else:
print(0)
|
[
"acoustic0419@gmail.com"
] |
acoustic0419@gmail.com
|
d4cbaf36b608ec4f75a9244c59fad4ff17838dba
|
620d21623a300821e2a195eed5434bac67fb4dca
|
/abb_experimental_ws/build/abb_irb2400_moveit_config/catkin_generated/pkg.develspace.context.pc.py
|
bad71f63564b6113dd733b4f2835133b33daf66a
|
[] |
no_license
|
yazici/Robarch-dev
|
bb63c04bd2e62386e7c1215bf5b116ccd763c455
|
9f8db4c418db3fc80454200cb6625cc2b2151382
|
refs/heads/master
| 2020-05-21T02:32:06.765535
| 2018-09-07T15:50:26
| 2018-09-07T15:50:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "abb_irb2400_moveit_config"
PROJECT_SPACE_DIR = "/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/devel/.private/abb_irb2400_moveit_config"
PROJECT_VERSION = "1.2.1"
|
[
"email.jrv@gmail.com"
] |
email.jrv@gmail.com
|
137ce9f638c8cbef7f90ef43a198d1e2bbff6f71
|
b1152e66088975211a7b3ae61f68c69630644f4a
|
/MIX_Graph_Multi_Agent/Attention/attention.py
|
c49680b7c0cdad91350c6e900a3da5d1bf04167d
|
[] |
no_license
|
lxjlu/H2G-MAAC
|
ef09a6f838816ff3b47b71781204dcb1fe11a30b
|
144542e098932d61b744bf337b3c6637b358a3b7
|
refs/heads/main
| 2023-06-21T22:26:22.018345
| 2021-07-17T10:38:41
| 2021-07-17T10:38:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
# ## Multi-head Attention
# In[ ]:
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask=None):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def main():
temp_mha = MultiHeadAttention(d_model=256, num_heads=4)
q = tf.random.uniform((64, 1, 60)) # (batch_size, encoder_sequence, d_model)
k = tf.random.uniform((64, 10, 60)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(k, k=k, q=q, mask=None)
print(out.shape, attn.shape)
if __name__ == '__main__':
main()
|
[
"787873309@qq.com"
] |
787873309@qq.com
|
7f457300765fa9938e6a03720e657ddbc14780bf
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2017_01_18_predict_bulk_other_regressors/transform.py
|
184cc8d04f8835c28a879cbb63722d039d7d7ed8
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
import functions as rr
import numpy as np
# from sklearn.decomposition import PCA
from constants import const
import time
import h5py
def transform(ns, set_id, pca):
st = time.time()
C = const()
n_corr = C['cmax']
f_red = h5py.File("spatial_reduced_L%s.hdf5" % C['H'], 'a')
f_stats = h5py.File("spatial_L%s.hdf5" % C['H'], 'r')
ff = f_stats.get('ff_%s' % set_id)[...]
ff = ff.reshape(ns, n_corr*C['vmax']**3)
ff_red = pca.transform(ff)
f_red.create_dataset('reduced_%s' % set_id,
data=ff_red,
dtype='float64')
f_red.close()
f_stats.close()
"""calculate the error incurred in the PCA representation"""
ff_ = pca.inverse_transform(ff_red)
err = np.sqrt(np.sum((ff-ff_)**2))/ff.size
msg = "PCA representation error for %s: %s" % (set_id, err)
rr.WP(msg, C['wrt_file'])
timeE = np.round(time.time()-st, 2)
msg = "transform to low dimensional space, %s: %s s" % (set_id, timeE)
rr.WP(msg, C['wrt_file'])
if __name__ == '__main__':
ns = 10
set_id = 'random'
reduce(ns, set_id)
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
350f64bc1b133699e73ca47c6c2a94e3069fb404
|
43ede7b8fb546c00804c0ef94501f6e48ba170d6
|
/Cursos Python/Python 3 - João Batista/Coleções.py
|
85ca78819113eed3dccd3d7d8939d46dd30dbba8
|
[] |
no_license
|
bopopescu/Python-13
|
db407d17252473e78e705e563cfee4dbd316c6b9
|
c8bef500f2d3e4a63d850f96dfa219eff2ecebda
|
refs/heads/master
| 2022-11-22T16:24:08.490879
| 2020-06-11T14:22:24
| 2020-06-11T14:22:24
| 281,830,055
| 0
| 0
| null | 2020-07-23T02:26:31
| 2020-07-23T02:26:30
| null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# # Listas
# lista = ['eduardo - 0', 'atila - 1', 'lebiste - 2']
#
# # print(lista)
# #
# # for i in lista:
# # print(i)
#
# # print(lista[0])
#
# lista.append('Erickson - 3')
# # ['eduardo - 0', 'atila - 1', 'lebiste - 2', 'Erickson - 3']
# lista.insert(1, 'Lucas')
#
# print(dir(lista))
# # # Tuplas
# tupla = (10, 20, 30)
#
# for loop in tupla:
# if loop == 30:
# print('show', loop)
dicionario = {'numeros': [num for num in range(10)], 'segunda chave': 'segundo valor '}
print(dicionario.get('numeros'))
|
[
"ofc.erickson@gmail.com"
] |
ofc.erickson@gmail.com
|
b8a111842e89df90bce5aefed525fe7b800362c2
|
b25c4a44e5d33d4d2acf98219b0fbac324bad6c7
|
/Lv0__16_to_30/29.시저 암호.py
|
bc49a0dc8c9ad2fa45459fa03b81bf3f1b5989ab
|
[] |
no_license
|
itwebMJ/algorithmStudy
|
409d46bfa4d0b63d7137a0c64dd09c640dc99460
|
9881e07d276742d243bcd1a4929f726d5ec293ff
|
refs/heads/main
| 2023-07-12T18:08:45.157261
| 2021-08-29T12:45:45
| 2021-08-29T12:45:45
| 375,876,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
'''
시저 암호
어떤 문장의 각 알파벳을 일정한 거리만큼 밀어서 다른 알파벳으로 바꾸는 암호화 방식을 시저 암호라고 합니다.
예를 들어 "AB"는 1만큼 밀면 "BC"가 되고, 3만큼 밀면 "DE"가 됩니다.
"z"는 1만큼 밀면 "a"가 됩니다. 문자열 s와 거리 n을 입력받아 s를 n만큼 민 암호문을 만드는 함수, solution을
완성해 보세요.
'''
def solution(s, n):
answer = ''
up = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
low = up.lower()
s_list = []
for i in s:
s_list.append(i)
for j in range(len(s_list)):
if s_list[j] in up:
idx = up.index(s_list[j])
answer += up[int((idx + n) % 26)]
elif s_list[j] in low:
idx = low.index(s_list[j])
answer += low[int((idx + n) % 26)]
elif s_list[j] == ' ':
answer += ' '
return answer
|
[
"rlaalwn61@naver.com"
] |
rlaalwn61@naver.com
|
e8d44fcc8ab38010d57edd595269bffa26a9b608
|
6c791df92e63f8f1ba579c0aa326a7fdc9cfbf19
|
/Recheck/Test_Grad_Descent.py
|
f1b2aac7d945649530f9e92d50d4d83273d3c1a4
|
[] |
no_license
|
krm9c/BDHeterogeneity
|
5261ca874d2923da3d1320ac1b02e23b23e8d64a
|
eabb9bcedbe0cf580c6391eeaf455a675803970a
|
refs/heads/master
| 2020-03-22T10:22:59.038734
| 2019-02-12T22:43:15
| 2019-02-12T22:43:15
| 139,898,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,040
|
py
|
# The test file
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ' '
import Class_Recheck as NN_class
import tensorflow as tf
import numpy as np
import traceback
import random
###################################################################################
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert inputs.shape[0] == targets.shape[0]
if shuffle:
indices = np.arange(inputs.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
###################################################################################
def return_dict(placeholder, List, model, batch_x, batch_y, lr):
S ={}
for i, element in enumerate(List):
S[placeholder[i]] = element
S[model.Deep['FL_layer_10']] = batch_x
S[model.classifier['Target'] ] = batch_y
S[model.classifier["learning_rate"]] = lr
return S
####################################################################################
def sample_Z(X, m, n, kappa):
# return(X+np.random.uniform(-2, 2, size=[m, n]))
return ((X+np.random.normal(0,1, size=[m, n])))
####################################################################################
def Analyse_custom_Optimizer_GDR_old(X_train, y_train, X_test, y_test, kappa):
import gc
# Lets start with creating a model and then train batch wise.
model = NN_class.learners()
depth = []
depth.append(X_train.shape[1])
L = [100 for i in xrange(1)]
depth.extend(L)
lr = 0.001
model = model.init_NN_custom(classes, lr, depth, tf.nn.relu)
try:
t = xrange(Train_Glob_Iterations)
from tqdm import tqdm
for i in tqdm(t):
########### mini-batch learning update
batch_number = 0
#for batch in iterate_minibatches(X_train, y_train, Train_batch_size, shuffle=True):
for k in xrange(100):
x_batch =[]
y_batch =[]
arr = random.sample(range(0, len(X_train)), 64)
for idx in arr:
x_batch.append(X_train[idx])
y_batch.append(y_train[idx])
batch_xs = np.asarray(x_batch)
batch_ys = np.asarray(y_batch)
model.sess.run([model.Trainer["Weight_op"]],\
feed_dict={model.Deep['FL_layer_10']: batch_xs, model.classifier['Target']: \
batch_ys, model.classifier["learning_rate"]:lr})
if i%20== 0:
print "Step", i
X_test_perturbed = sample_Z(X_test, X_test.shape[0], X_test.shape[1], kappa=0)
print( "Accuracies", model.sess.run([model.Evaluation['accuracy']], \
feed_dict={model.Deep['FL_layer_10']: X_test_perturbed, model.classifier['Target']:\
y_test, model.classifier["learning_rate"]:lr}), model.sess.run([ model.Evaluation['accuracy']],\
feed_dict={model.Deep['FL_layer_10']: X_train, model.classifier['Target']:\
y_train}) )
# batch_number = batch_number + 1;
# batch_xs, batch_ys = batch
# batch_xs_pert =sample_Z(batch_xs, batch_xs.shape[0], batch_xs.shape[1], kappa=1)0
# model.sess.run([model.Trainer["Weight_op"]],\
# feed_dict={model.Deep['FL_layer_10']: batch_xs, model.classifier['Target']: \
# batch_ys, model.classifier["learning_rate"]:lr})
# if j % 1 == 0:
# print(model.sess.run([model.Evaluation['accuracy'] ],\
# feed_dict={model.Deep['FL_layer_10']: X_test, model.classifier['Target']: \
# y_test, model.classifier["learning_rate"]:lr}) )
except Exception as e:
print("I found an exception", e)
traceback.print_exc()
tf.reset_default_graph()
del model
gc.collect()
return 0
#######################################################################################################
################################ Parameters and function call##########################################
#######################################################################################################
# Setup the parameters and call the functions
Train_batch_size = 64
Train_Glob_Iterations = 501
Train_noise_Iterations = 1
from tqdm import tqdm
from tensorflow.examples.tutorials.mnist import input_data
classes = 4
mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)
X_train = mnist.train.images
X_test = mnist.test.images
y_train = mnist.train.labels
y_test = mnist.test.labels
iterat_kappa = 1
Kappa_s = np.random.uniform(0, 1, size=[iterat_kappa])
Analyse_custom_Optimizer_GDR_old(X_train, y_train, X_test, y_test, Kappa_s[0])
|
[
"krm9c@mst.edu"
] |
krm9c@mst.edu
|
d8f8e08efb7e0e9134c5ee0f78f8b256b26822b1
|
eae3d77ac72c168cee7701462f1fc45d7d4dcd91
|
/2115_벌꿀채취.py
|
a196922576c19447850463993fc206b2387b3b9a
|
[] |
no_license
|
ByeongjunCho/Algorithm-TIL
|
ed2f018d50bd2483bd1175ff9bf7e91913c14766
|
ad79125a1498915fe97c1d57ee6860b06c410958
|
refs/heads/master
| 2022-07-19T15:12:23.689319
| 2020-05-18T08:37:09
| 2020-05-18T08:37:09
| 256,399,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,769
|
py
|
# SWEA 2115. [모의 SW 역량테스트] 벌꿀채취
def makemaxmap():
for i in range(N):
for j in range(N-M+1):
makeMaxSubset(i, j, 0, 0, 0)
# i: 행, j: 열, cnt: 고려한원소수
# sum: 부분집합에 속한 원소의 합
# powSum: 부분집합에 속한 원소의 이익
def makeMaxSubset(i, j, cnt, sum, powSum):
if sum > C: # 부분집합의 합은 목표량C를 초과하면 리턴
return
if cnt == M:
if maxMap[i][j-M] < powSum:
maxMap[i][j - M] = powSum
return
# 선택
makeMaxSubset(i, j+1, cnt+1, sum+arr[i][j], powSum+(arr[i][j]**2))
# 비선택
makeMaxSubset(i, j + 1, cnt + 1, sum, powSum)
def getMaxBenefit():
max = 0 # 조합적 선택후 최대이익값
temp = 0
# 1. 일꾼 A를 기준으로 선택
for i in range(N):
for j in range(N-M+1):
# 2. 일꾼 B 선택
# 2.1 같은행 기준
for j2 in range(j+M, N-M+1):
temp = maxMap[i][j] + maxMap[i][j2]
if max < temp:
max = temp
# 다음행부터 마지막까지 선택
for i2 in range(i+1, N):
for j2 in range(N-M+1):
temp = maxMap[i][j] + maxMap[i2][j2]
if max < temp:
max = temp
return max
T = int(input())
for tc in range(1, T+1):
N, M, C = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
maxMap = [[0 for _ in range(N)] for _ in range(N)]
# 1. 각 위치에서 연속된 M개를 고려해 취할 수 있는 부분집합의 최대값 선택
makemaxmap()
# 2. 최대값 출력
print("#{} {}".format(tc, getMaxBenefit()))
|
[
"jjgk91@naver.com"
] |
jjgk91@naver.com
|
8579ba110a3dfce9e5b310a76a8b8b051ef5e561
|
a8544cedbec52f929e91e617a5f484d987352525
|
/src/tests/src/technique/transitive/test_transitive_calculator.py
|
3947a0e6a65c93529e95fe334898ef0a291bc475
|
[] |
no_license
|
thearod5/Tracer
|
75df513ee2500bffc32c29139983990867239369
|
67ee3d7296fb4c788c111387b335ab9804815655
|
refs/heads/master
| 2023-05-31T13:53:34.640103
| 2021-06-18T01:00:10
| 2021-06-18T01:00:10
| 349,507,401
| 1
| 0
| null | 2021-06-18T01:00:10
| 2021-03-19T17:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
import numpy as np
from api.extension.cache import Cache
from api.technique.definitions.direct.calculator import DirectTechniqueData
from api.technique.definitions.transitive.calculator import (
TransitiveTechniqueCalculator,
TransitiveTechniqueData,
append_direct_component_matrices,
perform_transitive_aggregation,
perform_transitive_aggregation_on_component_techniques,
)
from api.technique.variationpoints.aggregation.aggregation_method import (
AggregationMethod,
)
from tests.res.test_technique_helper import SimilarityMatrixMock, TestTechniqueHelper
class TestIntermediateCalculationPipeline(TestTechniqueHelper):
matrices = [
np.array([[0, 1, 0]]),
np.array([[0], [1], [0]]),
np.array([[0, 1], [0, 0]]),
]
"""
IntermediatePipeline
"""
def test_transitive_pipeline(self):
counter_func, counter_dict = self.create_counter_func(
self.get_transitive_definition().get_name()
)
pipeline_funcs = [counter_func, counter_func]
pipeline = TransitiveTechniqueCalculator(
self.get_transitive_definition(), pipeline_funcs
)
pipeline.run_pipeline_on_dataset(self.dataset)
self.assertEqual(len(pipeline_funcs), counter_dict["value"])
"""
IndirectTechniqueCalculator
"""
def test_transitive_technique_calculator_use_case(self):
calculator = TransitiveTechniqueCalculator(self.get_transitive_definition())
technique_data = calculator.calculate_technique_data(self.dataset)
matrix = technique_data.similarity_matrix
self.assertEqual((1, 3), matrix.shape)
"""
calculate_technique_data
"""
def test_calculate_technique_data(self):
original = Cache.CACHE_ON
Cache.CACHE_ON = False
def counter_func(data: DirectTechniqueData):
data.similarity_matrix = SimilarityMatrixMock()
pipeline_funcs = [counter_func]
calculator = TransitiveTechniqueCalculator(
self.get_transitive_definition(), pipeline_funcs
)
technique_data = calculator.calculate_technique_data(self.dataset)
self.assertEqual(self.dataset.name, technique_data.dataset.name)
self.assertEqual(
self.get_transitive_definition().get_name(),
technique_data.technique.get_name(),
)
self.assertIsNotNone(technique_data.similarity_matrix)
Cache.CACHE_ON = original
"""
perform_transitive_aggregation
"""
def test_perform_transitive_aggregation(self):
data = TransitiveTechniqueData(
self.dataset, self.get_traced_transitive_definition()
)
append_direct_component_matrices(data)
perform_transitive_aggregation(data)
self.assertEqual((1, 3), data.similarity_matrix.shape)
self.assertEqual(1, data.similarity_matrix[0][0])
self.assertEqual(0, data.similarity_matrix[0][1])
self.assertEqual(1, data.similarity_matrix[0][2])
"""
perform_transitive_aggregation_on_matrices
"""
def test_perform_transitive_aggregation_on_matrices(self):
result = perform_transitive_aggregation_on_component_techniques(
self.matrices, AggregationMethod.MAX
)
self.assertEqual((1, 2), result.shape)
self.assertEqual(1, result[0][1])
self.assertEqual(1, result.sum(axis=1).sum())
|
[
"vhsalbertorodriguez@gmail.com"
] |
vhsalbertorodriguez@gmail.com
|
75c48e2a11389ce75a618495581d70c80b79da56
|
09e32f424b4f1e54709b7bc483023c273c28b559
|
/w4/lucyflowers/lucyflowers.py
|
eb2fcb9e72ab8e75a0375018f68a92fae82f3361
|
[] |
no_license
|
lenin2ud/hackerrank
|
6d4283e4d86ad0778ad19f85f5cb8d4d7d98bff6
|
7996542d87c32c6fffdc0d005dea92b57a18cd26
|
refs/heads/master
| 2021-03-27T04:30:32.732800
| 2014-06-13T00:10:30
| 2014-06-13T00:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
# https://www.hackerrank.com/contests/w4/challenges/lucy-and-flowers
MOD = 10 ** 9 + 9
MAX_N = 5000
dp = [1] * (MAX_N + 1)
inv = [0] * (MAX_N + 1)
mem = [0] * (MAX_N + 1)
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise ValueError
return x % m
def precompute(n):
for i in range(1, n + 1):
r = 0
for j in range(i):
r += (dp[j] * dp[i - j - 1]) % MOD
dp[i] = r % MOD
for i in range(1, MAX_N + 1):
inv[i] = modinv(i, MOD)
def solve(n):
if mem[n] != 0:
return mem[n]
result = 0
comb = 1
for i in range(1, n + 1):
comb = (comb * (n - i + 1) * inv[i]) % MOD
result = (result + dp[i] * comb) % MOD
mem[n] = result
return result
if __name__ == '__main__':
precompute(MAX_N)
T = int(raw_input())
for t in range(T):
n = int(raw_input())
print solve(n)
|
[
"marek@stepniowski.com"
] |
marek@stepniowski.com
|
3289fda5e1f37200848fba7f873c3f84f00ef0cf
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/automation/v20170515preview/get_source_control.py
|
bd8cc3a9b818a03ca55f93a52a7bcfd1acd42b85
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,646
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSourceControlResult',
'AwaitableGetSourceControlResult',
'get_source_control',
]
@pulumi.output_type
class GetSourceControlResult:
"""
Definition of the source control.
"""
def __init__(__self__, auto_sync=None, branch=None, creation_time=None, description=None, folder_path=None, id=None, last_modified_time=None, name=None, publish_runbook=None, repo_url=None, source_type=None, type=None):
if auto_sync and not isinstance(auto_sync, bool):
raise TypeError("Expected argument 'auto_sync' to be a bool")
pulumi.set(__self__, "auto_sync", auto_sync)
if branch and not isinstance(branch, str):
raise TypeError("Expected argument 'branch' to be a str")
pulumi.set(__self__, "branch", branch)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if folder_path and not isinstance(folder_path, str):
raise TypeError("Expected argument 'folder_path' to be a str")
pulumi.set(__self__, "folder_path", folder_path)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if publish_runbook and not isinstance(publish_runbook, bool):
raise TypeError("Expected argument 'publish_runbook' to be a bool")
pulumi.set(__self__, "publish_runbook", publish_runbook)
if repo_url and not isinstance(repo_url, str):
raise TypeError("Expected argument 'repo_url' to be a str")
pulumi.set(__self__, "repo_url", repo_url)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoSync")
def auto_sync(self) -> Optional[bool]:
"""
The auto sync of the source control. Default is false.
"""
return pulumi.get(self, "auto_sync")
@property
@pulumi.getter
def branch(self) -> Optional[str]:
"""
The repo branch of the source control. Include branch as empty string for VsoTfvc.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> Optional[str]:
"""
The creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="folderPath")
def folder_path(self) -> Optional[str]:
"""
The folder path of the source control.
"""
return pulumi.get(self, "folder_path")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> Optional[str]:
"""
The last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publishRunbook")
def publish_runbook(self) -> Optional[bool]:
"""
The auto publish of the source control. Default is true.
"""
return pulumi.get(self, "publish_runbook")
@property
@pulumi.getter(name="repoUrl")
def repo_url(self) -> Optional[str]:
"""
The repo url of the source control.
"""
return pulumi.get(self, "repo_url")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
"""
The source type. Must be one of VsoGit, VsoTfvc, GitHub.
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSourceControlResult(GetSourceControlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSourceControlResult(
auto_sync=self.auto_sync,
branch=self.branch,
creation_time=self.creation_time,
description=self.description,
folder_path=self.folder_path,
id=self.id,
last_modified_time=self.last_modified_time,
name=self.name,
publish_runbook=self.publish_runbook,
repo_url=self.repo_url,
source_type=self.source_type,
type=self.type)
def get_source_control(automation_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
source_control_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSourceControlResult:
"""
Definition of the source control.
:param str automation_account_name: The name of the automation account.
:param str resource_group_name: Name of an Azure Resource group.
:param str source_control_name: The name of source control.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['resourceGroupName'] = resource_group_name
__args__['sourceControlName'] = source_control_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:automation/v20170515preview:getSourceControl', __args__, opts=opts, typ=GetSourceControlResult).value
return AwaitableGetSourceControlResult(
auto_sync=__ret__.auto_sync,
branch=__ret__.branch,
creation_time=__ret__.creation_time,
description=__ret__.description,
folder_path=__ret__.folder_path,
id=__ret__.id,
last_modified_time=__ret__.last_modified_time,
name=__ret__.name,
publish_runbook=__ret__.publish_runbook,
repo_url=__ret__.repo_url,
source_type=__ret__.source_type,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
fd8475f2fa48aed730dfe0df90c3253f83661d75
|
5d61565651b7ba5fa8fade3313a5e82fca8b6686
|
/goodstype/forms.py
|
bab317474e9e54bac606219efd097ee9de0e8a3c
|
[] |
no_license
|
lonelyxmas/ISMS
|
d597b00072bfa77907875f575b866fbb1fb53295
|
08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306
|
refs/heads/master
| 2023-08-14T12:02:59.001215
| 2021-03-22T03:34:58
| 2021-03-22T03:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,013
|
py
|
from django.forms import ModelForm
from django import forms
from django.forms import widgets as Fwidgets
from .models import *
class GoosTypeModelForm(ModelForm):
class Meta:
model = goodstype
fields = '__all__'
widgets = {
'FID': Fwidgets.Input(attrs={'type': 'hidden'}),
'FGoodsTypeID': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FGoodsType': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDeviationType': Fwidgets.Select(attrs={'lay-verify': 'required'}),
'FPositiveDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FNegativeDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDesc': Fwidgets.Input(attrs={'class': 'layui-input', 'autocomplete': 'off'})
}
class SubTypeModelForm(ModelForm):
FPID = forms.ChoiceField(widget=forms.Select(attrs={'lay-verify': 'required', 'disabled': 'disabled'}), required=False)
class Meta:
model = goodstype
fields = '__all__'
widgets = {
'FID': Fwidgets.Input(attrs={'type': 'hidden'}),
'FGoodsTypeID': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FGoodsType': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDeviationType': Fwidgets.Select(attrs={'lay-verify': 'required'}),
'FPositiveDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FNegativeDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDesc': Fwidgets.Input(attrs={'class': 'layui-input', 'autocomplete': 'off'})
}
|
[
"11325818@qq.com"
] |
11325818@qq.com
|
7821a32281f931b75f37a80413b9ec794a2804e3
|
1377e0c1b2200f5130b149ff77cf0fda5a1d2aa9
|
/src/pmr2/client/script.py
|
6cbae7a5414bf1624b23dd63f667976f1e80867c
|
[] |
no_license
|
alan-wu/pmr2.client
|
c97ef8371be931da281eba33082d649ce65e1201
|
3dc6afa008159acaa5b8bde4b291920ea3eceb3d
|
refs/heads/master
| 2020-04-04T21:19:54.317615
| 2014-09-01T02:50:32
| 2014-09-01T02:50:52
| 156,282,171
| 0
| 0
| null | 2018-11-05T20:58:31
| 2018-11-05T20:58:30
| null |
UTF-8
|
Python
| false
| false
| 7,941
|
py
|
import os.path
import traceback
import json
import code
import pdb
import webbrowser
from urllib import quote_plus
from requests_oauthlib.oauth1_session import TokenRequestDenied
try:
import readline
except ImportError:
pass
from pmr2.client import Client
from pmr2.client import DemoAuthClient
HOME = os.path.expanduser('~')
CONFIG_FILENAME = os.path.join(HOME, '.pmr2clirc')
PMR2ROOT = 'http://staging.physiomeproject.org'
CONSUMER_KEY = 'ovYoqjlJLrpCcEWcIFyxtqRS'
CONSUMER_SECRET = 'fHssEYMWZzgo6JWUBh4l1bhd'
DEFAULT_SCOPE = quote_plus(
'http://staging.physiomeproject.org/oauth_scope/collection,'
'http://staging.physiomeproject.org/oauth_scope/search,'
'http://staging.physiomeproject.org/oauth_scope/workspace_tempauth,'
'http://staging.physiomeproject.org/oauth_scope/workspace_full'
)
class Cli(object):
token_key = ''
token_secret = ''
active = False
state = None
_debug = 0
last_response = None
def __init__(self,
site=PMR2ROOT,
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
scope=DEFAULT_SCOPE,
):
self.auth_client = DemoAuthClient(site, consumer_key, consumer_secret)
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
if isinstance(value, int):
self._debug = value
if isinstance(value, basestring):
if value.lower() in ('false', 'no', '0',):
self._debug = 0
else:
self._debug = 1
def build_config(self):
return {
'token_key':
self.auth_client.session._client.client.resource_owner_key,
'token_secret':
self.auth_client.session._client.client.resource_owner_secret,
'debug': self.debug,
'scope': DEFAULT_SCOPE,
}
def load_config(self, filename=CONFIG_FILENAME):
try:
with open(filename, 'r') as fd:
config = json.load(fd)
except IOError:
print("Fail to open configuration file.")
config = self.build_config()
except ValueError:
print("Fail to decode JSON configuration. Using default values.")
config = self.build_config()
token = config.get('token_key', '')
secret = config.get('token_secret', '')
self.auth_client.session._client.client.resource_owner_key = token
self.auth_client.session._client.client.resource_owner_secret = secret
self.debug = config.get('debug', 0)
self.scope = config.get('scope', DEFAULT_SCOPE)
return token and secret
def save_config(self, filename=CONFIG_FILENAME):
try:
with open(filename, 'wb') as fd:
json.dump(self.build_config(), fd)
except IOError:
print("Error saving configuration")
def get_access(self):
# get user to generate one.
try:
self.auth_client.fetch_request_token(scope=self.scope)
except Exception as e:
print('Fail to request temporary credentials.')
return
target = self.auth_client.authorization_url()
webbrowser.open(target)
verifier = raw_input('Please enter the verifier: ')
self.auth_client.set_verifier(verifier=verifier)
token = self.auth_client.fetch_access_token()
return True
def do_help(self, arg=''):
"""
Print this message.
"""
print('Basic demo commands:')
print('')
for name in sorted(dir(self)):
if not name.startswith('do_'):
continue
obj = getattr(self, name)
if not callable(obj):
continue
print(name[3:])
print(obj.__doc__)
def do_console(self, arg=''):
"""
Start the interactive python console.
"""
console = code.InteractiveConsole(locals=locals())
result = console.interact('')
def do_dashboard(self, arg=''):
"""
List out the features available on the dashboard.
"""
dashboard = self.client(endpoint='dashboard')
if not arg:
for k, v in dashboard.value().items():
print('%s\t%s\t%s' % (k, v['label'], v['target']))
return
self.state = dashboard.get(arg)
print('Acquired state "%s"; use console to interact.') % arg
def do_list_workspace(self, arg=''):
"""
Returns a list of workspaces within your private workspace
container.
"""
dashboard = self.client(endpoint='dashboard')
state = dashboard.get('workspace-home')
for i in state.value():
print('"%s"\t%s' % (i['title'], i['target']))
def do_raw(self, arg=''):
"""
Open a target URL to receive raw API output.
"""
a = arg.split(None, 1)
url = ''.join(a[:1])
data = ''.join(a[1:])
if not url:
print("URL is required.")
return
if not data:
self.state = self.client(url)
else:
self.state = self.client(url, data=data)
print(self.client.last_response.json())
def do_property(self, arg=''):
"""
Set property for this object.
"""
permitted = ['debug']
a = arg.split()
if len(a) < 1:
print("need both key and values.")
return
args = list(arg.split())
prop = args.pop(0)
if len(a) < 2:
print('%s = %s') % (prop, getattr(self, prop))
return
if prop not in permitted:
print("'%s' cannot be set") % prop
return
setattr(self, prop, ' '.join(args))
def shell(self):
while self.active:
try:
raw = raw_input('pmr2cli> ')
if not raw:
continue
rawargs = raw.split(None, 1)
command = rawargs.pop(0)
obj = getattr(self, 'do_' + command, None)
if callable(obj):
obj(*rawargs)
else:
print("Invalid command, try 'help'.")
except EOFError:
self.active = False
print('')
except KeyboardInterrupt:
print('\nGot interrupt signal.')
self.active = False
except ValueError:
print("Couldn't decode json.")
# print("Status was %d") % self.last_response.status_code
print("Use console to check `self.last_response` for details.")
except:
print(traceback.format_exc())
if self.debug:
pdb.post_mortem()
def run(self):
access = self.load_config()
if not access:
try:
access = self.get_access()
except TokenRequestDenied as e:
print('Fail to validate the verifier.')
if not access:
self.save_config()
return
self.client = Client(PMR2ROOT,
session=self.auth_client.session, use_default_headers=True)
try:
self.client()
except ValueError as e:
# JSON decoding error
print('Credentials are invalid and are purged. Quitting')
self.auth_client.session._client.client.resource_owner_key = ''
self.auth_client.session._client.client.resource_owner_secret = ''
self.scope = DEFAULT_SCOPE
self.save_config()
return
self.active = True
print('Starting PMR2 Demo Shell...')
self.save_config()
self.shell()
if __name__ == '__main__':
cli = Cli()
cli.run()
|
[
"tommy.yu@auckland.ac.nz"
] |
tommy.yu@auckland.ac.nz
|
f5e09da800b6be9e3ad3cd52937aa943b1c2ee6d
|
f087d996fd8164dc4fcf9b312533e51bd42029ae
|
/products/urls.py
|
cf8edd5c4698904055f1302df72e09cd2c83a3fe
|
[] |
no_license
|
Mohamed-Kudratov/Furniture_store
|
364abc300a3c00b999d54e45badfc7c8ca998e90
|
98754515937c1d7d934a75f0fe6e5f600a69b5e4
|
refs/heads/main
| 2023-07-18T04:40:16.770467
| 2021-08-30T15:31:16
| 2021-08-30T15:31:16
| 399,190,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django.urls import path
from products.views import ProductListView
app_name = 'products'
urlpatterns = [
path('', ProductListView.as_view(), name='list')
]
|
[
"you@example.com"
] |
you@example.com
|
75ccd59f1058f4fa224b44f4ba3b6b7670f1bca4
|
c8c77f6cc6c032daf179ea2138e4dda5473b426b
|
/s3/s3-python-example-download-file.py
|
44226fc6b8e67109cfe37b2a8ae8611f54e25ad1
|
[] |
no_license
|
arunmastermind/AWS-examples-using-BOTO3
|
b411a6c96011ab58a66952a53fa2938cb58d5135
|
e8390094374c10902bab016a21caba75ea179b5a
|
refs/heads/master
| 2020-09-30T13:34:33.657621
| 2019-12-11T12:37:44
| 2019-12-11T12:37:44
| 227,297,211
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
import boto3
import botocore
BUCKET_NAME = 'my-bucket' # replace with your bucket name
KEY = 'my_image_in_s3.jpg' # replace with your object key
s3 = boto3.resource('s3')
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'my_local_image.jpg')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
|
[
"arunmastermind.sci@gmail.com"
] |
arunmastermind.sci@gmail.com
|
9def36becf9665b78190a6e896d8622be917634c
|
4668b8330bb287eef380f990cce3d076bf9456df
|
/venv/lib/python3.6/site-packages/ray/__init__.py
|
eb02bacfc63ecab548d282500c5d067bd6463a88
|
[] |
no_license
|
Ali-Khakpash/redis-flask-training
|
1f7bb1745f224c752bbdb338f4bb4da5ad65f3fb
|
1d5a59a97486e734cb7b08ddb40c8aaeddd429d8
|
refs/heads/master
| 2020-11-25T10:19:25.553265
| 2020-03-12T19:03:36
| 2020-03-12T19:03:36
| 228,612,175
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,271
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from os.path import dirname
import sys
# MUST add pickle5 to the import path because it will be imported by some
# raylet modules.
if "pickle5" in sys.modules:
raise ImportError("Ray must be imported before pickle5 because Ray "
"requires a specific version of pickle5 (which is "
"packaged along with Ray).")
# Add the directory containing pickle5 to the Python path so that we find the
# pickle5 version packaged with ray and not a pre-existing pickle5.
pickle5_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
sys.path.insert(0, pickle5_path)
# Expose ray ABI symbols which may be dependent by other shared
# libraries such as _streaming.so. See BUILD.bazel:_raylet
so_path = os.path.join(dirname(__file__), "_raylet.so")
if os.path.exists(so_path):
import ctypes
from ctypes import CDLL
CDLL(so_path, ctypes.RTLD_GLOBAL)
# MUST import ray._raylet before pyarrow to initialize some global variables.
# It seems the library related to memory allocation in pyarrow will destroy the
# initialization of grpc if we import pyarrow at first.
# NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more
# details.
import ray._raylet # noqa: E402
if "pyarrow" in sys.modules:
raise ImportError("Ray must be imported before pyarrow because Ray "
"requires a specific version of pyarrow (which is "
"packaged along with Ray).")
# Add the directory containing pyarrow to the Python path so that we find the
# pyarrow version packaged with ray and not a pre-existing pyarrow.
pyarrow_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
sys.path.insert(0, pyarrow_path)
# See https://github.com/ray-project/ray/issues/131.
helpful_message = """
If you are using Anaconda, try fixing this problem by running:
conda install libgcc
"""
try:
import pyarrow # noqa: F401
# pyarrow is not imported inside of _raylet because of the issue described
# above. In order for Cython to compile _raylet, pyarrow is set to None
# in _raylet instead, so we give _raylet a real reference to it here.
# We first do the attribute checks here so that building the documentation
# succeeds without fully installing ray..
# TODO(edoakes): Fix this.
if hasattr(ray, "_raylet") and hasattr(ray._raylet, "pyarrow"):
ray._raylet.pyarrow = pyarrow
except ImportError as e:
if ((hasattr(e, "msg") and isinstance(e.msg, str)
and ("libstdc++" in e.msg or "CXX" in e.msg))):
# This code path should be taken with Python 3.
e.msg += helpful_message
elif (hasattr(e, "message") and isinstance(e.message, str)
and ("libstdc++" in e.message or "CXX" in e.message)):
# This code path should be taken with Python 2.
condition = (hasattr(e, "args") and isinstance(e.args, tuple)
and len(e.args) == 1 and isinstance(e.args[0], str))
if condition:
e.args = (e.args[0] + helpful_message, )
else:
if not hasattr(e, "args"):
e.args = ()
elif not isinstance(e.args, tuple):
e.args = (e.args, )
e.args += (helpful_message, )
raise
from ray._raylet import (
ActorCheckpointID,
ActorClassID,
ActorID,
ClientID,
Config as _Config,
JobID,
WorkerID,
FunctionID,
ObjectID,
TaskID,
UniqueID,
) # noqa: E402
_config = _Config()
from ray.profiling import profile # noqa: E402
from ray.state import (global_state, jobs, nodes, tasks, objects, timeline,
object_transfer_timeline, cluster_resources,
available_resources, errors) # noqa: E402
from ray.worker import (
LOCAL_MODE,
SCRIPT_MODE,
WORKER_MODE,
connect,
disconnect,
get,
get_gpu_ids,
get_resource_ids,
get_webui_url,
init,
is_initialized,
put,
register_custom_serializer,
remote,
shutdown,
wait,
) # noqa: E402
import ray.internal # noqa: E402
import ray.projects # noqa: E402
# We import ray.actor because some code is run in actor.py which initializes
# some functions in the worker.
import ray.actor # noqa: F401
from ray.actor import method # noqa: E402
from ray.runtime_context import _get_runtime_context # noqa: E402
# Ray version string.
__version__ = "0.8.0"
__all__ = [
"global_state",
"jobs",
"nodes",
"tasks",
"objects",
"timeline",
"object_transfer_timeline",
"cluster_resources",
"available_resources",
"errors",
"LOCAL_MODE",
"PYTHON_MODE",
"SCRIPT_MODE",
"WORKER_MODE",
"__version__",
"_config",
"_get_runtime_context",
"actor",
"connect",
"disconnect",
"get",
"get_gpu_ids",
"get_resource_ids",
"get_webui_url",
"init",
"internal",
"is_initialized",
"method",
"profile",
"projects",
"put",
"register_custom_serializer",
"remote",
"shutdown",
"wait",
]
# ID types
__all__ += [
"ActorCheckpointID",
"ActorClassID",
"ActorID",
"ClientID",
"JobID",
"WorkerID",
"FunctionID",
"ObjectID",
"TaskID",
"UniqueID",
]
import ctypes # noqa: E402
# Windows only
if hasattr(ctypes, "windll"):
# Makes sure that all child processes die when we die. Also makes sure that
# fatal crashes result in process termination rather than an error dialog
# (the latter is annoying since we have a lot of processes). This is done
# by associating all child processes with a "job" object that imposes this
# behavior.
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
|
[
"ali.khakpash@gmail.com"
] |
ali.khakpash@gmail.com
|
f5d74b47c47767172a0a4f417aabf004bcfcd977
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_192/ch50_2020_03_31_18_24_04_548537.py
|
160206f489154d64f31662ca60f92d5e9b86c3d2
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
def junta_nome_sobrenome(nome, sobrenome):
n_s = []
espaco = [' ']*len(n)
i = 0
while i < len(n):
n_s.append(n[i]) = espaco[i] + s[i]
i += 1
print(n_s)
|
[
"you@example.com"
] |
you@example.com
|
d83403b3e62411169dc322e3b39e4f5ae49837ef
|
dbcef3da83c75c61542c85cfb02dd2b97d5316b5
|
/016 3Sum Closest/3Sum-Closest.py
|
f35017735b3e80d7a98e0f12802a93dacd5e1931
|
[] |
no_license
|
wecoderBao/own-leetcode-solution-python
|
bbf3efad162f542f510293e614bbbadf67dcd899
|
ef1760df16d2e298162a33a2ab27a537f8527446
|
refs/heads/master
| 2021-01-24T10:52:41.404740
| 2018-03-26T03:34:04
| 2018-03-26T03:34:04
| 123,067,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
"""
Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target.
Return the sum of the three integers. You may assume that each input would have exactly one solution.
For example, given array S = {-1 2 1 -4}, and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
"""
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def twoSumClosest(nums, start, sum2):
end = len(nums) - 1
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
while start < end:
if nums[start] + nums[end] == sum2:
ans = nums[start] + nums[end]
break
elif nums[start] + nums[end] > sum2:
if abs(sum2 - nums[start] - nums[end]) < distance:
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
end -= 1
elif nums[start] + nums[end] < sum2:
if abs(sum2 - nums[start] - nums[end]) < distance:
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
start += 1
return ans
nums.sort()
result = nums[0] + nums[1] + nums[2]
# float("inf")正无穷 float("-inf")负无穷
distance = float("inf")
for i in range(len(nums)):
if i + 1 < len(nums) - 1:
sum3 = nums[i] + twoSumClosest(nums, i + 1, target - nums[i])
if abs(target - sum3) < distance:
distance = abs(target - sum3)
result = sum3
return result
if __name__ == '__main__':
arr = [-3,-2,-5,3,-4]
s = Solution()
print(s.threeSumClosest(arr, -1))
|
[
"sunbao@corp.netease.com"
] |
sunbao@corp.netease.com
|
b23881cd3ec3b09bc5fbeeb2a6134e6300077f74
|
524baf7de05bd3fc5b9d08083cbb0b7b47a67979
|
/66.py
|
7ea2f8d6d49eac49fd8935d35ebcf0323fa8d74d
|
[] |
no_license
|
gk90731/100-questions-practice
|
1356dd577516567a5c51a4257f59fe01b123e7ff
|
f855549e037b9924dd6f0370dc2f2a53765d9227
|
refs/heads/master
| 2020-05-25T14:04:59.642819
| 2019-05-21T12:49:04
| 2019-05-21T12:49:04
| 187,835,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
"""Question: Create an English to Portuguese translation program.
The program takes a word from the user as input and translates it using the following dictionary as a vocabulary source.
d = dict(weather = "clima", earth = "terra", rain = "chuva")
Expected output:
Enter word:
earth
terra"""
###########################################################################
d = dict(weather = "clima", earth = "terra", rain = "chuva")
def vocabulary(word):
return d[word]
word = input("Enter word: ")
print(vocabulary(word))
|
[
"gk90731@gmail.com"
] |
gk90731@gmail.com
|
6bd0239951439edd729d4ce1d71d7ea2d4fbd1ad
|
97e0064a13111eef4709a0b865e58cf9d8804cc1
|
/restore_expense.py
|
8a1372de0f0fdfc5f069b0ca1fd295e00234f914
|
[] |
no_license
|
c1xfr2e/kky_stuff
|
ee9dc03f985b405b79388b385543747ad490f3aa
|
47dc5aafeb8710bebd1486d5a7aff7f669ea94ce
|
refs/heads/master
| 2021-05-28T23:02:11.676345
| 2015-07-02T10:28:24
| 2015-07-02T10:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,600
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'zh'
__date__ = '6/30/15'
from pymongo import MongoClient
from bson import ObjectId
import pickle
import datetime
import time
import sys
import logging
import tablib
import xlrd
import decimal
client = MongoClient('mongodb://sa:kuaikuaiyu1219@123.56.131.68:7900/admin')
db_wukong = client['wukong-release']
db_console = client['console-release']
c_courier = db_wukong['courier']
c_log = db_console['log']
c_expense = db_wukong['expend']
c_withdraw = db_wukong['withdraw']
start_dt = datetime.datetime(2015, 6, 28)
end_dt = datetime.datetime(2015, 6, 30, 14)
start_timestamp = int(time.mktime(start_dt.timetuple()) * 1000)
end_timestamp = int(time.mktime(end_dt.timetuple()) * 1000)
'''
unfreeze_logs = list(c_log.find(
{
'action': 'courier_account',
'arguments.freeze': 'unfreeze',
'created_time': {
'$gte': start_timestamp,
'$lt': end_timestamp
}
}
))
unfreeze_courier_ids = [ ObjectId(log['arguments']['id'][0]) for log in unfreeze_logs]
headers = (
'速递员ID',
'速递员所属校区',
'速递员姓名',
'速递员手机号'
)
couriers = list(c_courier.find(
{
'_id': { '$in': unfreeze_courier_ids }
}
))
lines = []
for c in couriers:
line = (
str(c['_id']),
c.get('school', ''),
c.get('name', ''),
c.get('mobile', '')
)
lines.append(line)
data = tablib.Dataset(*lines, headers=headers)
with open('couriers.xls', 'wb') as f:
f.write(data.xls)
bad_expense = list(c_expense.find(
{
'courier_id': { '$in': unfreeze_courier_ids },
'status': { '$in': ['unprocessed', 'freezed'] }
}
))
'''
bad_expense = list(c_expense.find(
{
'status': { '$in': ['freezed'] }
}
))
bad_withdraw_ids = []
bad_expense_ids = []
for expense in bad_expense:
fine_amount = expense['fine_amount']
if fine_amount > 0:
result = c_courier.update(
{ '_id': expense['courier_id'] },
{
'$inc': {
'debt': int(fine_amount)
}
}
)
print result
bad_withdraw_ids.append(expense['withdraw_id'])
bad_expense_ids.append(expense['_id'])
result = c_withdraw.update(
{ '_id': { '$in': bad_withdraw_ids} },
{
'$set': {
'status': 'unprocessed',
'unfreezed_time': int(time.time() * 1000)
}
},
multi=True
)
print result
result = c_expense.remove(
{ '_id': { '$in': bad_expense_ids } }
)
print result
|
[
"gurity@163.com"
] |
gurity@163.com
|
1de1dd49bfdc0892c65112b7ef0032830fb8ab54
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_azure_firewall_fqdn_tags_operations.py
|
2a9ee6dd7a76b016f22f8b6f45c0eb67eb0302ba
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 5,081
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallFqdnTagsOperations:
"""AzureFirewallFqdnTagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs
) -> AsyncIterable["models.AzureFirewallFqdnTagListResult"]:
"""Gets all the Azure Firewall FQDN Tags in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallFqdnTagListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.AzureFirewallFqdnTagListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewallFqdnTagListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallFqdnTagListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewallFqdnTags'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
30119909b42166146f8b4dccfc70438638f747a2
|
cfa3f958c8b4c7f8617731c6580c16e8daee6218
|
/board/todos/models.py
|
1d2a1417eac0374b90f6398deeed334911f10766
|
[] |
no_license
|
cdh3261/Django
|
dd01f9c07c8b501c95445748e5d590565ca68352
|
0003b617ae500cf191e4af5cc8ab5fd06f02f76e
|
refs/heads/master
| 2022-12-22T23:12:41.271650
| 2019-11-04T07:57:46
| 2019-11-04T07:57:46
| 217,967,586
| 0
| 0
| null | 2022-11-22T04:46:53
| 2019-10-28T04:43:09
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
due_date = models.DateField()
author = models.CharField(max_length=50)
|
[
"cdh3261@naver.com"
] |
cdh3261@naver.com
|
31556e2ff279b0f2bc83581d282addea9f319f6a
|
a2dc75a80398dee58c49fa00759ac99cfefeea36
|
/bluebottle/activities/migrations/0043_auto_20210420_0847.py
|
1832ca60d59f7222957c239e0285e3bbb6f24d90
|
[
"BSD-2-Clause"
] |
permissive
|
onepercentclub/bluebottle
|
e38b0df2218772adf9febb8c6e25a2937889acc0
|
2b5f3562584137c8c9f5392265db1ab8ee8acf75
|
refs/heads/master
| 2023-08-29T14:01:50.565314
| 2023-08-24T11:18:58
| 2023-08-24T11:18:58
| 13,149,527
| 15
| 9
|
BSD-3-Clause
| 2023-09-13T10:46:20
| 2013-09-27T12:09:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-04-20 06:47
from __future__ import unicode_literals
from django.db import migrations, connection
def create_activity_view(apps, schema_editor):
sql = """
DROP VIEW IF EXISTS activities;
CREATE VIEW activities AS
SELECT ct.model::text AS activity_type,
ac.title,
ac.id,
ac.status,
ac.created,
ac.updated
FROM {0}.activities_activity ac
LEFT JOIN {0}.time_based_dateactivity da ON da.timebasedactivity_ptr_id = ac.id
LEFT JOIN {0}.time_based_periodactivity pa ON pa.timebasedactivity_ptr_id = ac.id
LEFT JOIN {0}.funding_funding fu ON fu.activity_ptr_id = ac.id
LEFT JOIN {0}.deeds_deed de ON de.activity_ptr_id = ac.id
JOIN {0}.django_content_type ct ON ac.polymorphic_ctype_id = ct.id;
""".format(connection.tenant.schema_name)
if connection.tenant.schema_name != 'public':
schema_editor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('activities', '0042_effortcontribution_contribution_type'),
('deeds', '0007_auto_20210222_1644')
]
operations = [
migrations.RunPython(create_activity_view, migrations.RunPython.noop)
]
|
[
"loek@goodup.com"
] |
loek@goodup.com
|
988702a78c19d40f847900e6fd1f3b46d60d54af
|
86ed811106eecf7aa3a15cf98537ef274b811ad7
|
/headmasters/migrations/0009_headmasterprofile_crop_url.py
|
1cdccb88138baab74eb6d4ab65b1dfaaa7729e12
|
[] |
no_license
|
SaifulAbir/Django-MIS
|
934ad39beff62f0e1cbe9377738b780122989662
|
d680a0a64211bc9cd7748364454c52b16398ea5c
|
refs/heads/master
| 2022-10-19T11:57:46.087577
| 2020-02-03T10:10:08
| 2020-02-03T10:10:08
| 271,542,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Generated by Django 2.2.4 on 2019-10-22 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('headmasters', '0008_auto_20190930_1246'),
]
operations = [
migrations.AddField(
model_name='headmasterprofile',
name='crop_url',
field=models.TextField(blank=True, default='', null=True),
),
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
de894f519d533dd6183e61c9dd8f23315fa88388
|
652e6171022bb844102e191e9459e73ff2d7901b
|
/tests/optimizations/HardImports_2.py
|
8b513f9b738753c890d6b6be85033361b2fdc0ce
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
pombredanne/Nuitka
|
e07ee1ba2c027c25e4feebc9751bbb0c1cb338b1
|
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
|
refs/heads/develop
| 2022-03-16T23:55:49.295972
| 2022-02-20T14:28:23
| 2022-02-20T14:28:23
| 69,127,861
| 0
| 0
| null | 2016-09-24T21:10:20
| 2016-09-24T21:10:20
| null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
def sysOptionalAttribute():
return sys.maxint, sys.subversion
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
df189a233dc0b05d92ae76eda7e06be7f66882b2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/41/usersdata/112/24387/submittedfiles/gravitacional.py
|
888bc07241bd9b1b980d67c03c96c8be908f635c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import funcoes
#ENTRADA
dimensao = input('Digite a dimensao das matrizes: ')
matrizA = raw_input('Digite a Matriz A como uma única linha entre aspas: ')
matrizD = raw_input('Digite a Matriz D como uma única linha entre aspas: ')
alfa = input('Digite o valor de alfa: ')
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1ef5a65135c034f3e78359e2d7b635ff06eb63f5
|
a884039e1a8b0ab516b80c2186e0e3bad28d5147
|
/Livros/Livro-Introdução à Programação-Python/Capitulo 10/Exemplos/nome.py
|
08708c5df927089c0f4d9b9c55738c7715ea25a2
|
[
"MIT"
] |
permissive
|
ramonvaleriano/python-
|
6e744e8bcd58d07f05cd31d42a5092e58091e9f0
|
ada70918e945e8f2d3b59555e9ccc35cf0178dbd
|
refs/heads/main
| 2023-04-10T14:04:24.497256
| 2021-04-22T18:49:11
| 2021-04-22T18:49:11
| 340,360,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# Program: nome.py
# Author: Ramon R. Valeriano
# Decription:
# Developed: 28/02/2020 - 11:28
class Nome:
def __init__(self, nome):
if nome == None or not nome.strip():
raise ValueError('Nome não pode ser nulo nem em branco.')
self.nome = nome
self.chave = nome.strip().lower()
def __str__(self):
return self.nome
def __repr__(self):
return f'<Class {type(self).__name__} em 0x{id(self):x} Nome: {self.nome}'
def __eq__(self, outro):
print('__eq__ Chamado')
return self.nome == outro
def __lt__(self, outro):
print('__lt__ Chamado')
return self.nome < outro
|
[
"rrvaleriano@gmail.com"
] |
rrvaleriano@gmail.com
|
d9abb2e4a97bc4acab4889f0068a81752db2542f
|
2486e0cc147230a5d69c6d052217b9f3c5a4d1a8
|
/Bindings/Python/setup.py.in
|
d3da821e0d0ecbd48f6d71c80c0154b247cb4a75
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bit20090138/opensim-core
|
8b68e13a2e5e0e538651c3f7940d8bed7a8a4fe3
|
de812be879d7271be92d71ac01c689a3b29e4629
|
refs/heads/master
| 2021-01-18T05:13:41.479462
| 2016-04-29T00:12:56
| 2016-04-29T00:12:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
in
|
#!/usr/bin/env python
import os
from setuptools import setup
setup(name='opensim',
version='@OPENSIM_VERSION@',
description='OpenSim Simulation Framework',
author='OpenSim Team',
author_email='ahabib@stanford.edu',
url='http://opensim.stanford.edu/',
license='Apache 2.0',
packages=['opensim'],
package_data={'opensim': ['_*.*']},
include_package_data=True,
classifiers=[
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Physics',
],
)
|
[
"cld72@cornell.edu"
] |
cld72@cornell.edu
|
9513a2411bfa39e1bbf4be4a084440f59c0b600b
|
a752920841038f1f84df06779ff041d6c1100697
|
/pypinyin/contrib/neutral_tone.pyi
|
774407edb8eff8fa0f780e5f8c4d3300f2351d42
|
[
"MIT"
] |
permissive
|
mozillazg/python-pinyin
|
06e5eaa5326b642d50aacbe71b7117ac6024b353
|
6a306a6ec0148502ae4e689a229340555ecb6333
|
refs/heads/master
| 2023-08-31T14:13:44.512972
| 2023-05-14T12:18:47
| 2023-05-14T12:18:47
| 12,830,126
| 4,564
| 634
|
MIT
| 2023-09-09T03:46:41
| 2013-09-14T14:01:40
|
Python
|
UTF-8
|
Python
| false
| false
| 518
|
pyi
|
# -*- coding: utf-8 -*-
from typing import Any
from typing import Optional
from typing import Text
from typing import Tuple
from pypinyin.constants import Style
TStyle = Style
class NeutralToneWith5Mixin(object):
NUMBER_TONE = ... # type: Tuple[TStyle]
NUMBER_AT_END = ... # type: Tuple[TStyle]
def post_convert_style(self, han: Text, orig_pinyin: Text,
converted_pinyin: Text, style: TStyle,
strict: bool, **kwargs: Any) -> Optional[Text]: ...
|
[
"mozillazg101@gmail.com"
] |
mozillazg101@gmail.com
|
860fc2572ff197af9c82e05aa40b80bb2e6c03c2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02889/s701044419.py
|
15a300e4e766231cfc0aae20a07a2905af0d690c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
import sys
input = sys.stdin.readline
def calc(N, g):
for k in range(N):
for i in range(N):
for j in range(N):
g[i][j] = min(g[i][j], g[i][k] + g[k][j])
def main():
N, M, L = map(int, input().split())
adj = [{} for _ in range(N)]
for _ in range(M):
A, B, C = map(int, input().split())
adj[A-1][B-1] = C
adj[B-1][A-1] = C
dst = [[float("inf")] * N for _ in range(N)]
for i in range(N):
dst[i][i] = 0
for j in adj[i]:
dst[i][j] = adj[i][j]
calc(N, dst)
ans = [[float("inf")] * N for _ in range(N)]
for i in range(N):
ans[i][i] = 0
for j in range(i+1, N):
if dst[i][j] <= L:
ans[i][j] = 1
ans[j][i] = 1
calc(N, ans)
Q = int(input())
for _ in range(Q):
s, t = map(int, input().split())
x = ans[s-1][t-1]
print(-1 if x == float("inf") else x-1)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.