blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fb2e07f62201caffa8b67a78f4e24fe0fe44f69
|
0d178d54334ddb7d669d212b11dd23ef5607cf8e
|
/LeetCode/Array/4Sum.py
|
11f7a1109fd6bbfb0bdb4c287a979f1a7fa60b2f
|
[] |
no_license
|
mrunalhirve12/Python_CTCI-practise
|
2851d2c61fd59c76d047bd63bd591849c0781dda
|
f41348fd7da3b7af9f9b2df7c01457c7bed8ce0c
|
refs/heads/master
| 2020-04-17T11:09:29.213922
| 2019-09-28T02:36:24
| 2019-09-28T02:36:24
| 166,529,867
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
"""
Given an array nums of n integers and an integer target, are there elements a, b, c, and d in nums such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note:
The solution set must not contain duplicate quadruplets.
Example:
Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
# idea to use same technique of incrementing and decrementing pointers
a = sorted(nums)
res = set()
n = len(a)
for i in range(0, n-3):
for j in range(i+1, n-2):
rem = target - (a[i] + a[j])
left, right = j+1, n-1
while left < right:
if a[left] + a[right] == rem:
# to add tuple to res
res.add(tuple([a[i], a[j], a[left], a[right]]))
left = left + 1
elif a[left] + a[right] < rem:
left = left + 1
else:
right = right - 1
# sorted converts set to list
return sorted([list(x) for x in res])
s = Solution()
print(s.fourSum([1, 0, -1, 0, -2, 2], 0))
|
[
"mrunalhirve@gmail.com"
] |
mrunalhirve@gmail.com
|
99d474d6de01788f9f44e8db380fcd8057be8c85
|
2e996d6870424205bc6af7dabe8685be9b7f1e56
|
/code/processing/20190325_r3_O3_IND_titration_flow/file_rename.py
|
51d27275b95739132c62e7ef1b063c6806355426
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
minghao2016/mwc_mutants
|
fd705d44e57e3b2370d15467f31af0ee3945dcc2
|
0f89b3920c6f7a8956f48874615fd1977891e33c
|
refs/heads/master
| 2023-03-25T03:56:33.199379
| 2020-06-26T20:09:00
| 2020-06-26T20:09:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import fcsparser
import os
import glob
# Define the details fo the expriment.
USERNAME = 'gchure'
DATE = 20190325
RUN_NO = 3
FCS_PATTERN = 'RP2019-03-25'
savedir = '../../../data/flow/csv/'
# Define the order of rows and the cols.
R = (0, 0, 260, 260, 260, 260)
ROWS = ('auto', 'delta', 'F164T', 'Q294V', 'Q294K', 'Q294R')
OPS = ('NA', 'O3', 'O3', 'O3', 'O3', 'O3')
COLS = (0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000)
# Get the names of the files
files = glob.glob('../../../data/flow/fcs/{0}*r{1}*.fcs'.format(FCS_PATTERN, RUN_NO))
files = np.sort(files)
# Break the list up into columns.
ncols, nrows = len(COLS), len(ROWS)
col_groups = [files[i:i + nrows] for i in range(0, len(files), nrows)]
for i, col in enumerate(col_groups):
for j, samp in enumerate(col):
# Define the new name.
name = '{0}_r{1}_{2}_R{3}_{4}_{5}uMIPTG'.format(
DATE, RUN_NO, OPS[j], R[j], ROWS[j], COLS[i])
# Load the file using fcsparser and save to csv.
_, data = fcsparser.parse(samp)
data.to_csv('{0}{1}.csv'.format(savedir, name))
# Rename the fcs file.
os.rename(samp, '../../../data/flow/fcs/{0}.fcs'.format(name))
|
[
"gchure@caltech.edu"
] |
gchure@caltech.edu
|
79a91e47db28a01386fb815a32b47a218c215852
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/tyk2_input/31/31-46_MD_NVT_rerun/set_7.py
|
83bf1b35f537aee7c2dd8f6127d9919cfeab9ce4
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/tyk2/L31/MD_NVT_rerun/ti_one-step/31_46/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
b24b3b508692c9d3bbffa96ff99acdc158a53fa4
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007.1/desktop/kde/base/kdesdk/actions.py
|
ab36d22af81fe87c89ebff98566184311e00fa96
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import kde
def setup():
autotools.make("-f admin/Makefile.common")
kde.configure("--with-subversion \
--with-berkeley-db \
--with-db-name=db-4.2 \
--with-db-include-dir=/usr/include/db4.2")
def build():
kde.make()
def install():
kde.install()
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
db0afec86c62701b4b6b347de2fe3cb745f7d55f
|
ef32b87973a8dc08ba46bf03c5601548675de649
|
/pytglib/api/functions/get_chat_sponsored_message.py
|
71868b22788dde705d4134cc9c51f27345d2e10d
|
[
"MIT"
] |
permissive
|
iTeam-co/pytglib
|
1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721
|
d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5
|
refs/heads/master
| 2022-07-26T09:17:08.622398
| 2022-07-14T11:24:22
| 2022-07-14T11:24:22
| 178,060,880
| 10
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
from ..utils import Object
class GetChatSponsoredMessage(Object):
"""
Returns sponsored message to be shown in a chat; for channel chats only. Returns a 404 error if there is no sponsored message in the chat
Attributes:
ID (:obj:`str`): ``GetChatSponsoredMessage``
Args:
chat_id (:obj:`int`):
Identifier of the chat
Returns:
SponsoredMessage
Raises:
:class:`telegram.Error`
"""
ID = "getChatSponsoredMessage"
def __init__(self, chat_id, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
@staticmethod
def read(q: dict, *args) -> "GetChatSponsoredMessage":
chat_id = q.get('chat_id')
return GetChatSponsoredMessage(chat_id)
|
[
"arshshia@gmail.com"
] |
arshshia@gmail.com
|
fc77eaf0993fe68fe4b3692b3b0971b77c561865
|
8bb6fad924eae0aa03e36e70816ab9659131c190
|
/test/account_test.py
|
47ce554ce9c49f948983a15223a1f0369c55b25b
|
[
"MIT"
] |
permissive
|
birkin/illiad3_client
|
98c6f2200a24b140dc1a489692a16d552554d402
|
d9dc3a1dbdc9b4c3181111eedc02867ab0d59088
|
refs/heads/master
| 2020-12-03T04:01:20.922533
| 2018-07-13T13:06:20
| 2018-07-13T13:06:20
| 95,804,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,263
|
py
|
import os, sys, pprint, unittest
## add project parent-directory to sys.path
parent_working_dir = os.path.abspath( os.path.join(os.getcwd(), os.pardir) )
sys.path.append( parent_working_dir )
from illiad3_client.illiad3.account import IlliadSession
class AccountTest(unittest.TestCase):
def setUp(self):
self.ILLIAD_REMOTE_AUTH_URL = os.environ['ILLIAD_MODULE__TEST_REMOTE_AUTH_URL']
self.ILLIAD_REMOTE_AUTH_KEY = os.environ['ILLIAD_MODULE__TEST_REMOTE_AUTH_KEY']
self.ILLIAD_USERNAME = os.environ['ILLIAD_MODULE__TEST_USERNAME']
self.ill = IlliadSession(
self.ILLIAD_REMOTE_AUTH_URL, self.ILLIAD_REMOTE_AUTH_KEY, self.ILLIAD_USERNAME )
def tearDown(self):
self.ill.logout()
def test_login(self):
login_resp_dct = self.ill.login()
self.assertTrue( 'session_id' in login_resp_dct.keys() )
self.assertTrue( 'authenticated' in login_resp_dct.keys() )
self.assertTrue( 'registered' in login_resp_dct.keys() )
self.assertTrue( login_resp_dct['authenticated'] )
## submit_key tests ##
def test_submit_key(self):
""" Tests submit_key on article openurl. """
ill = self.ill
ill.login()
#Url encoded
openurl = "rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.spage=538&rft.issue=5&rft.date=2010-02-11&rft.volume=16&url_ver=Z39.88-2004&rft.atitle=Targeting+%CE%B17+Nicotinic+Acetylcholine+Receptors+in+the+Treatment+of+Schizophrenia.&rft.jtitle=Current+pharmaceutical+design&rft.issn=1381-6128&rft.genre=article"
submit_key = ill.get_request_key(openurl)
self.assertEqual(submit_key['ILLiadForm'],
'ArticleRequest')
self.assertEqual(submit_key['PhotoJournalTitle'],
'Current pharmaceutical design')
def test_book(self):
""" Tests submit_key on simple book openurl (includes a note). """
ill = self.ill
ill.login()
openurl = "sid=FirstSearch:WorldCat&genre=book&isbn=9780231122375&title=Mahatma%20Gandhi%20%3A%20nonviolent%20power%20in%20action&date=2000&rft.genre=book¬es=%E2%80%9Ci%C3%B1t%C3%ABrn%C3%A2ti%C3%B8n%C3%A0l%C4%ADz%C3%A6ti%D0%A4n%E2%80%9D"
submit_key = ill.get_request_key(openurl)
self.assertEqual( 'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual( 'Mahatma Gandhi : nonviolent power in action', submit_key['LoanTitle'] )
self.assertEqual( 'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual( '“iñtërnâtiønàlĭzætiФn”', submit_key['Notes'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'ISSN', 'LoanDate', 'LoanTitle', 'NotWantedAfter', 'Notes', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_book_with_long_openurl(self):
""" Tests submit_key on long book openurl. """
ill = self.ill
ill.login()
openurl = 'sid=FirstSearch%3AWorldCat&genre=book&isbn=9784883195732&title=Shin+kanzen+masuta%CC%84.+Nihongo+no%CC%84ryoku+shiken&date=2011&aulast=Fukuoka&aufirst=Rieko&id=doi%3A&pid=858811926%3Cfssessid%3E0%3C%2Ffssessid%3E%3Cedition%3EShohan.%3C%2Fedition%3E&url_ver=Z39.88-2004&rfr_id=info%3Asid%2Ffirstsearch.oclc.org%3AWorldCat&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&req_dat=%3Csessionid%3E0%3C%2Fsessionid%3E&rfe_dat=%3Caccessionnumber%3E858811926%3C%2Faccessionnumber%3E&rft_id=info%3Aoclcnum%2F858811926&rft_id=urn%3AISBN%3A9784883195732&rft.aulast=Fukuoka&rft.aufirst=Rieko&rft.btitle=Shin+kanzen+masuta%CC%84.+Nihongo+no%CC%84ryoku+shiken&rft.date=2011&rft.isbn=9784883195732&rft.place=To%CC%84kyo%CC%84&rft.pub=Suri%CC%84e%CC%84+Nettowa%CC%84ku&rft.edition=Shohan.&rft.genre=book'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ESPNumber', 'ILLiadForm', 'ISSN', 'LoanAuthor', 'LoanDate', 'LoanEdition', 'LoanPlace', 'LoanPublisher', 'LoanTitle', 'NotWantedAfter', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_bookitem(self):
""" Tests submit_key on genre=bookitem openurl. """
ill = self.ill
ill.login()
openurl = 'url_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.btitle=Current%20Protocols%20in%20Immunology&rft.atitle=Isolation%20and%20Functional%20Analysis%20of%20Neutrophils&rft.date=2001-05-01&rft.isbn=9780471142737&rfr_id=info%3Asid%2Fwiley.com%3AOnlineLibrary'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'BookChapterRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'ISSN', 'NotWantedAfter', 'PhotoArticleTitle', 'PhotoJournalInclusivePages', 'PhotoJournalTitle', 'PhotoJournalYear', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_tiny_openurl(self):
""" Tests submit_key on painfully minimalist openurl. """
ill = self.ill
ill.login()
openurl = 'sid=Entrez:PubMed&id=pmid:23671965'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'LoanDate', 'LoanTitle', 'NotWantedAfter', 'Notes', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
self.assertEqual(
'entire openurl: `sid=Entrez:PubMed&id=pmid:23671965`', submit_key['Notes'] )
def test_logout(self):
""" Tests logout. """
response_dct = self.ill.logout()
self.assertTrue( 'authenticated' in response_dct.keys() )
self.assertFalse(response_dct['authenticated'])
def suite():
suite = unittest.makeSuite(AccountTest, 'test')
return suite
if __name__ == '__main__':
unittest.main()
|
[
"birkin.diana@gmail.com"
] |
birkin.diana@gmail.com
|
f6ebb3862bcfeae9cb815cf8f6f75caf7ece1cbf
|
c4a57dced2f1ed5fd5bac6de620e993a6250ca97
|
/huaxin/huaxin_ui/ui_android_xjb_2_0/register_page.py
|
f36c00fdfd14245afe93d9b85d7c54953dbe4ae2
|
[] |
no_license
|
wanglili1703/firewill
|
f1b287b90afddfe4f31ec063ff0bd5802068be4f
|
1996f4c01b22b9aec3ae1e243d683af626eb76b8
|
refs/heads/master
| 2020-05-24T07:51:12.612678
| 2019-05-17T07:38:08
| 2019-05-17T07:38:08
| 187,169,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,679
|
py
|
# coding: utf-8
from _common.page_object import PageObject
from _common.xjb_decorator import gesture_close_afterwards, user_info_close_afterwards, robot_log
from _tools.mysql_xjb_tools import MysqlXjbTools
from huaxin_ui.ui_android_xjb_2_0.binding_card_page import BindingCardPage
import huaxin_ui.ui_android_xjb_2_0.home_page
PHONE_NUMBER = "xpath_//android.widget.EditText[@text='请输入手机号码']"
GET_VERIFICATION_CODE = "xpath_//android.widget.Button[@text='获取验证码']"
VERIFICATION_CODE_INPUT = "xpath_//android.widget.EditText[@text='请输入验证码']"
PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/register_pwd']"
LOGIN_PASSWORD_CONFIRM = "xpath_//android.widget.Button[@text='注册']"
BINDING_CARD = "xpath_//android.widget.Button[@text='绑定银行卡']"
SHOPPING_FIRST = "xpath_//android.widget.TextView[@text='先逛逛']"
TRADE_PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/tradepwd_et']"
TRADE_PASSWORD_CONFIRM = "xpath_//android.widget.Button[@text='下一步']"
current_page = []
class RegisterPage(PageObject):
def __init__(self, web_driver):
super(RegisterPage, self).__init__(web_driver)
self.elements_exist(*current_page)
self._db = MysqlXjbTools()
@user_info_close_afterwards
@gesture_close_afterwards
def register(self, phone_number, login_password):
self.perform_actions(
PHONE_NUMBER, phone_number,
GET_VERIFICATION_CODE,
PASSWORD, login_password,
)
verification_code = MysqlXjbTools().get_sms_verify_code(mobile=phone_number, template_id='cif_register')
self.perform_actions(
VERIFICATION_CODE_INPUT, verification_code,
LOGIN_PASSWORD_CONFIRM,
SHOPPING_FIRST,
)
page = huaxin_ui.ui_android_xjb_2_0.home_page.HomePage(self.web_driver)
return page
@robot_log
def register_binding_card(self, phone_number, login_password, trade_password):
self.perform_actions(PHONE_NUMBER, phone_number,
GET_VERIFICATION_CODE,
PASSWORD, login_password)
verification_code = MysqlXjbTools().get_sms_verify_code(mobile=phone_number, template_id='cif_register')
self.perform_actions(VERIFICATION_CODE_INPUT, verification_code, )
self.perform_actions(
LOGIN_PASSWORD_CONFIRM,
BINDING_CARD,
TRADE_PASSWORD, trade_password,
TRADE_PASSWORD, trade_password,
TRADE_PASSWORD_CONFIRM,
)
page = BindingCardPage(self.web_driver)
return page
|
[
"wanglili@shhxzq.com"
] |
wanglili@shhxzq.com
|
9f51a684b8c7951a2e4fc7e6f2705499041116ae
|
8f7a30fd1c4d70535ba253d6e442576944fdfd7c
|
/Topics/Magic methods/10 puppies/main.py
|
e444a74a24c6ddca7f787232073b25a34c423935
|
[] |
no_license
|
TogrulAga/Coffee-Machine
|
9596c3d8ef1b7347d189249f20602b584d8842e3
|
f065de747bd1b626e4e5a06fac68202e41b6c11e
|
refs/heads/master
| 2023-04-11T20:54:21.710264
| 2021-05-09T23:01:48
| 2021-05-09T23:01:48
| 365,864,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
class Puppy:
n_puppies = 0 # number of created puppies
# define __new__
def __new__(cls):
if cls.n_puppies >= 10:
return None
cls.n_puppies += 1
return object.__new__(cls)
|
[
"toghrul.aghakishiyev@ericsson.com"
] |
toghrul.aghakishiyev@ericsson.com
|
5b8468dad0ffc2610646ee99a9814491cbdeb199
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/eve/client/script/ui/eveUIProcs.py
|
ea6ae5bc59cf6e80cb3020348a440d2d503d85e2
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 3,969
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\eveUIProcs.py
import uthread
import eve.common.script.sys.eveCfg as util
import locks
import random
import svc
import carbonui.const as uiconst
import localization
class EveUIProcSvc(svc.uiProcSvc):
__guid__ = 'svc.eveUIProcSvc'
__replaceservice__ = 'uiProcSvc'
__startupdependencies__ = ['cmd']
def Run(self, *args):
svc.uiProcSvc.Run(self, *args)
self.uiCallbackDict = {None: self._NoneKeyIsInvalid_Callback,
'OpenCharacterCustomization': self.__OpenCharacterCustomization_Callback,
'CorpRecruitment': self._CorpRecruitment_Callback,
'OpenCorporationPanel_Planets': self._OpenCorporationPanel_Planets_Callback,
'OpenAuraInteraction': self.cmd.OpenAuraInteraction,
'ExitStation': self.cmd.CmdExitStation,
'OpenFitting': self.cmd.OpenFitting,
'OpenShipHangar': self.cmd.OpenShipHangar,
'OpenCargoBay': self.cmd.OpenCargoHoldOfActiveShip,
'OpenDroneBay': self.cmd.OpenDroneBayOfActiveShip,
'OpenMarket': self.cmd.OpenMarket,
'OpenAgentFinder': self.cmd.OpenAgentFinder,
'OpenStationDoor': self.__OpenStationDoor_Callback,
'EnterHangar': self.cmd.CmdEnterHangar,
'GiveNavigationFocus': self._GiveNavigationFocus_Callback}
self.isOpeningPI = False
def _PerformUICallback(self, callbackKey):
callback = self.uiCallbackDict.get(callbackKey, None)
if callback is not None:
uthread.worker('_PerformUICallback_%s' % callbackKey, self._PerformUICallbackTasklet, callbackKey, callback)
return True
self.LogError('ActionObject.PerformUICallback: Unknown callbackKey', callbackKey)
return False
def _PerformUICallbackTasklet(self, callbackKey, callback):
try:
callback()
except TypeError as e:
self.LogError('ActionObject.PerformUICallback: callbackKey "%s" is associated with a non-callable object: %s' % (callbackKey, callback), e)
def _NoneKeyIsInvalid_Callback(self):
self.LogError('PerformUICallback called from ActionObject without the callbackKey property (it was None)!')
def _CorpRecruitment_Callback(self):
if util.IsNPC(session.corpid):
self.cmd.OpenCorporationPanel_RecruitmentPane()
else:
self.cmd.OpenCorporationPanel()
def _GiveNavigationFocus_Callback(self):
sm.GetService('navigation').Focus()
def _OpenCorporationPanel_Planets_Callback(self):
if self.isOpeningPI:
return
self.isOpeningPI = True
try:
if sm.GetService('planetSvc').GetMyPlanets():
self.cmd.OpenPlanets()
else:
systemData = sm.GetService('map').GetSolarsystemItems(session.solarsystemid2)
systemPlanets = []
for orbitalBody in systemData:
if orbitalBody.groupID == const.groupPlanet:
systemPlanets.append(orbitalBody)
planetID = systemPlanets[random.randrange(0, len(systemPlanets))].itemID
sm.GetService('viewState').ActivateView('planet', planetID=planetID)
if not settings.user.suppress.Get('suppress.PI_Info', None):
uicore.Message('PlanetaryInteractionIntroText')
finally:
self.isOpeningPI = False
def __OpenStationDoor_Callback(self):
uicore.Message('CaptainsQuartersStationDoorClosed')
def __OpenCharacterCustomization_Callback(self):
if getattr(sm.GetService('map'), 'busy', False):
return
if uicore.Message('EnterCharacterCustomizationCQ', {}, uiconst.YESNO, uiconst.ID_YES) == uiconst.ID_YES:
self.cmd.OpenCharacterCustomization()
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
580505ac4ba1e1a284893894570d873fee8578a5
|
3bc7db0cc5f66aff517b18f0a1463fffd7b37a6f
|
/generate.py
|
5162c4a370a08417a9a630111ec0eec988adcd19
|
[
"MIT"
] |
permissive
|
patilvinay/docker-python-node
|
6643f96fd89214c7fe54c0010890052030e60016
|
fbab922c579ea0b6b12ce2183fe8d0e48cdd666a
|
refs/heads/master
| 2021-10-08T04:05:59.094149
| 2018-12-07T15:09:01
| 2018-12-07T15:09:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,020
|
py
|
#!/usr/bin/env python3
import itertools
import os
from copy import deepcopy
from glob import glob
from os.path import dirname
from os.path import join
from shutil import unpack_archive
from typing import List
from urllib.request import urlretrieve
import requests
import yaml
from dockerfile_compose import include_dockerfile
from packaging.version import Version
def get_repo_version(repo):
res = requests.get(f'https://api.github.com/repos/{repo}/branches/master',
headers={'Accept': 'application/vnd.github.v3+json'})
if res.status_code != 200:
raise RuntimeError(f"Can't get version for {repo}")
return res.json()['commit']['sha']
repos = {
'nodejs/docker-node': {
'version': get_repo_version('nodejs/docker-node')
},
'docker-library/python': {
'version': get_repo_version('docker-library/python')
}
}
def fetch_all_repos():
if not os.path.exists('repos'):
os.makedirs('repos')
for k, v in repos.items():
version = v['version']
url = f'https://github.com/{k}/archive/{version}.zip'
zip_name = k.split('/')[1]
zip = f'repos/{zip_name}-{version}.zip'
urlretrieve(url, zip)
unpack_archive(zip, extract_dir='repos')
def get_dockerfiles(path):
return glob(join(path, r'*/stretch/Dockerfile'))
def get_python_dockerfiles():
return get_dockerfiles('repos/python-{}'.format(repos['docker-library/python']['version']))
def get_node_dockerfiles():
return get_dockerfiles('repos/docker-node-{}'.format(repos['nodejs/docker-node']['version']))
def update_travis_yaml():
with open('.travis.yml', 'r') as travis_yaml:
travis_dict = yaml.safe_load(travis_yaml)
dockerfiles = glob('dockerfiles/*/Dockerfile')
travis_dict = travis_yaml_add_stages(travis_dict, dockerfiles)
with open('.travis.yml', 'w+') as travis_yaml:
travis_yaml.write('# generated by generate.py\n')
yaml.safe_dump(travis_dict, travis_yaml, default_flow_style=False)
def get_versions_from_dockerfile(dockerfile_path):
versions = {'node': None, 'python': None}
with open(dockerfile_path, 'r') as df:
for line in df:
if line.startswith('ENV'):
name, version = line.split()[1:]
if name == 'PYTHON_VERSION':
versions['python'] = Version(version)
if name == 'NODE_VERSION':
versions['node'] = Version(version)
return versions
def make_build_stage(dockerfile_path: str, tags: List[str]) -> dict:
return {
'stage': 'Image Builds',
'name': ', '.join(tags),
'if': 'type NOT IN (cron)',
'script': [
'set -e',
'echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin',
'# run tests',
f'travis_retry docker build -t austinpray/python-node {dirname(dockerfile_path)}',
*[f'docker tag austinpray/python-node austinpray/python-node:{tag}' for tag in tags],
*[f'[ "$TRAVIS_BRANCH" = "master" ] && docker push austinpray/python-node:{tag}' for tag in tags]
]
}
def travis_yaml_add_stages(travis_dict: dict, dockerfile_paths: List[str]) -> dict:
dockerfiles = []
for dockerfile_path in dockerfile_paths:
versions = get_versions_from_dockerfile(dockerfile_path)
dockerfiles.append({
'dockerfile_path': dockerfile_path,
'python_version': versions['python'],
'node_version': versions['node']
})
dockerfiles.sort(key=lambda x: (x['python_version'], x['node_version']))
dockerfiles.reverse()
def strip_version(version, n=0):
if n == 0:
return '.'.join(str(version).split('.'))
return '.'.join(str(version).split('.')[:n])
def group_by_version(py_offset=0, node_offset=0):
group = {}
for df in deepcopy(dockerfiles):
key = ''.join([
strip_version(df['python_version'],
py_offset),
'-',
strip_version(df['node_version'],
node_offset)
])
if key not in group:
group[key] = df['dockerfile_path']
return group
options = [-2, -1, 0]
dockerfile_tags = {}
for t in itertools.product(options, options):
for tag, dockerfile in group_by_version(t[0], t[1]).items():
if dockerfile not in dockerfile_tags:
dockerfile_tags[dockerfile] = [tag]
continue
dockerfile_tags[dockerfile].append(tag)
travis_dict['jobs'] = {
'include': [
*[make_build_stage(dockerfile_path=df,
tags=tags) for df, tags in dockerfile_tags.items()]
]
}
return travis_dict
def generate_dockerfiles():
for dockerfileTuple in itertools.product(get_python_dockerfiles(), get_node_dockerfiles()):
python_version = dockerfileTuple[0].split('/')[2]
node_version = dockerfileTuple[1].split('/')[2]
tag = f'{python_version}-{node_version}'
print(tag)
tag_dir = f'dockerfiles/{tag}'
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
with open(join(tag_dir, 'Dockerfile'), 'w+') as template:
template.write('''
# This is generated by generate.py, don't edit it directly
'''.strip())
template.write('\n')
template.write('FROM buildpack-deps:stretch\n')
template.write('\n')
with open(dockerfileTuple[0], 'r') as df:
include_dockerfile(df, template)
with open(dockerfileTuple[1], 'r') as df:
include_dockerfile(df, template)
template.write('CMD ["python3"]\n')
def main():
fetch_all_repos()
generate_dockerfiles()
update_travis_yaml()
if __name__ == '__main__':
main()
|
[
"austin@austinpray.com"
] |
austin@austinpray.com
|
513e63af05b9489a3168b1f4f389088edf36f4a2
|
0cf316b6a125442294acdf78fe725de42a3ce6b4
|
/python/CosmiQNet.training.py
|
6d6e36cca5b642da0885b772be944269f78223c1
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
GPrathap/utilities
|
2a5f9ef2df9fdaa7a2ee9208aa8bbbca879be1f2
|
0624564e53a2860e66265654c23908688067798a
|
refs/heads/master
| 2021-01-19T17:59:00.588299
| 2017-08-26T14:08:38
| 2017-08-26T14:08:38
| 101,102,401
| 0
| 0
| null | 2017-08-22T20:01:22
| 2017-08-22T20:01:22
| null |
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
# The NN
with tf.device(gpu):
# Input is has numberOfBands for the pre-processed image and numberOfBands for the original image
xy = tf.placeholder(tf.float32, shape=[None, FLAGS.ws, FLAGS.ws, 2*numberOfBands])
with tf.name_scope("split") as scope:
x = tf.slice(xy, [0,0,0,0], [-1,-1,-1,numberOfBands]) # low res image
y = tf.slice(xy, [0,0,0,numberOfBands], [-1,-1,-1,-1]) # high res image
with tf.name_scope("initial_costs") as scope:
# used as a measure of improvement not for optimization
cost_initial = tf.reduce_sum ( tf.pow( x-y,2))
MSE_initial = cost_initial/(FLAGS.ws*FLAGS.ws*(1.0*numberOfBands)*FLAGS.batch_size)
PSNR_initial = -10.0*tf.log(MSE_initial)/np.log(10.0)
for i in range(FLAGS.total_layers):
with tf.name_scope("layer"+str(i)) as scope:
# alpha and beta are pertubation layer bypass parameters that determine a convex combination of a input layer and output layer
alpha[i] = tf.Variable(0.1, name='alpha_'+str(i))
beta[i] = tf.maximum( FLAGS.min_alpha , tf.minimum ( 1.0 , alpha[i] ), name='beta_'+str(i))
if (0 == i) :
inlayer[i] = x
else :
inlayer[i] = outlayer[i-1]
# we build a list of variables to optimize per layer
vars_layer = [alpha[i]]
# Convolutional layers
W[i][0] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,numberOfBands,FLAGS.filters], stddev=0.1), name='W'+str(i)+'.'+str(0))
b[i][0] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='b'+str(i)+'.'+str(0))
conv[i][0] = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d( inlayer[i], W[i][0], strides=[1,1,1,1], padding='SAME'), b[i][0], name='conv'+str(i)+'.'+str(0)))
for j in range(1,FLAGS.convolutions_per_layer):
W[i][j] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,FLAGS.filters,FLAGS.filters], stddev=0.1), name='W'+str(i)+'.'+str(j))
b[i][j] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='b'+str(i)+'.'+str(j))
vars_layer = vars_layer + [W[i][j],b[i][j]]
conv[i][j] = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d( conv[i][j-1], W[i][j], strides=[1,1,1,1], padding='SAME'), b[i][j], name='conv'+str(i)+'.'+str(j)))
# Deconvolutional layer
Wo[i] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,numberOfBands,FLAGS.filters], stddev=0.1), name='Wo'+str(i))
bo[i] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='bo'+str(i))
deconv[i] = tf.nn.relu(
tf.nn.conv2d_transpose(
tf.nn.bias_add( conv[i][FLAGS.convolutions_per_layer-1], bo[i]), Wo[i], [FLAGS.batch_size,FLAGS.ws,FLAGS.ws,numberOfBands] ,strides=[1,1,1,1], padding='SAME'))
vars_layer = vars_layer + [Wo[i],bo[i]]
# Convex combination of input and output layer
outlayer[i] = tf.nn.relu( tf.add( tf.scalar_mul( beta[i] , deconv[i]), tf.scalar_mul(1.0-beta[i], inlayer[i])))
# sr is the super-resolution process. It really only has enhancement meaning during the current layer of training.
sr[i] = tf.slice(outlayer[i],[0,0,0,0],[-1,-1,-1,numberOfBands])
# The cost funtion to optimize. This is not PSNR but monotonically related
sr_cost[i] = tf.reduce_sum ( tf.pow( sr[i]-y,2))
MSE_sr[i] = sr_cost[i]/(FLAGS.ws*FLAGS.ws*numberOfBands*1.0*FLAGS.batch_size)
PSNR_sr[i] = -10.0*tf.log(MSE_sr[i])/np.log(10.0)
# ADAM optimizers seem to work well
optimizer_layer[i] = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(sr_cost[i], var_list=vars_layer)
optimizer_all[i] = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(sr_cost[i])
|
[
"ggeesara@gmail.com"
] |
ggeesara@gmail.com
|
eab79d50f246b41e7ca2d6791bef6ec5ac89c03c
|
ea4e3ac0966fe7b69f42eaa5a32980caa2248957
|
/download/unzip/pyobjc/pyobjc-14/pyobjc/stable/PyOpenGL-2.0.2.01/OpenGL/Demo/NeHe/lesson3.py
|
499a6e4689f5adda4626afec603848f84836b3c1
|
[] |
no_license
|
hyl946/opensource_apple
|
36b49deda8b2f241437ed45113d624ad45aa6d5f
|
e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a
|
refs/heads/master
| 2023-02-26T16:27:25.343636
| 2020-03-29T08:50:45
| 2020-03-29T08:50:45
| 249,169,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,888
|
py
|
#!
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
import string
__version__ = string.split('$Revision: 1.8 $')[1]
__date__ = string.join(string.split('$Date: 2002/12/31 04:13:55 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>'
#
# Ported to PyOpenGL 2.0 by Tarn Weisner Burton 10May2001
#
# This code was created by Richard Campbell '99 (ported to Python/PyOpenGL by John Ferguson 2000)
#
# The port was based on the PyOpenGL tutorial module: dots.py
#
# If you've found this code useful, please let me know (email John Ferguson at hakuin@voicenet.com).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not a good example of Python and using OO techniques. It is a simple and direct
# exposition of how to use the Open GL API in Python via the PyOpenGL package. It also uses GLUT,
# which in my opinion is a high quality library in that it makes my work simpler. Due to using
# these APIs, this code is more like a C program using function based programming (which Python
# is in fact based upon, note the use of closures and lambda) than a "good" OO program.
#
# To run this code get and install OpenGL, GLUT, PyOpenGL (see http://www.python.org), and PyNumeric.
# Installing PyNumeric means having a C compiler that is configured properly, or so I found. For
# Win32 this assumes VC++, I poked through the setup.py for Numeric, and chased through disutils code
# and noticed what seemed to be hard coded preferences for VC++ in the case of a Win32 OS. However,
# I am new to Python and know little about disutils, so I may just be not using it right.
#
# BTW, since this is Python make sure you use tabs or spaces to indent, I had numerous problems since I
# was using editors that were not sensitive to Python.
#
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
def DrawGLScene():
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
# Move Left 1.5 units and into the screen 6.0 units.
glTranslatef(-1.5, 0.0, -6.0)
# Since we have smooth color mode on, this will be great for the Phish Heads :-).
# Draw a triangle
glBegin(GL_POLYGON) # Start drawing a polygon
glColor3f(1.0, 0.0, 0.0) # Red
glVertex3f(0.0, 1.0, 0.0) # Top
glColor3f(0.0, 1.0, 0.0) # Green
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glColor3f(0.0, 0.0, 1.0) # Blue
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# Move Right 3.0 units.
glTranslatef(3.0, 0.0, 0.0)
# Draw a square (quadrilateral)
glColor3f(0.3, 0.5, 1.0) # Bluish shade
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(-1.0, 1.0, 0.0) # Top Left
glVertex3f(1.0, 1.0, 0.0) # Top Right
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
sys.exit()
def main():
global window
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Jeff Molofee's GL Code Tutorial ... NeHe '99")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
print "Hit ESC key to quit."
main()
|
[
"hyl946@163.com"
] |
hyl946@163.com
|
3257118e28b9313b80431811480ac0d8a136bdf6
|
dd6c23aa9e514b77c3902075ea54e8b754fd3bce
|
/docs/source/conf.py
|
e32250b11378e8936ab862fdc86707876239259d
|
[
"MIT"
] |
permissive
|
gvx/wurm
|
78b71880ff9acbd503281fbe61d77063bac59643
|
c6702aee03785713035ed75632b3898f4fee1664
|
refs/heads/master
| 2023-05-02T06:14:37.251061
| 2021-05-26T15:34:09
| 2021-05-26T15:34:09
| 328,152,422
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import pathlib
import sys
sys.path.insert(0, str(pathlib.Path(__file__).parent.parent.parent))
# -- Project information -----------------------------------------------------
project = 'wurm'
copyright = '2021, Jasmijn Wellner'
author = 'Jasmijn Wellner'
# The full version, including alpha/beta/rc tags
from wurm import __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
[
"gyvox.public@gmail.com"
] |
gyvox.public@gmail.com
|
dc2c585ae7d7fca0beee6bf3a1ad69b954519988
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-Quartz-2.5.1/Examples/TLayer/TLayerDemo.py
|
d71e50b3335c923a766abb8f7e771799cc0a1a04
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
from Cocoa import *
from PyObjCTools import NibClassBuilder
from Quartz import *
import objc
import ShadowOffsetView
class TLayerDemo (NSObject):
colorWell = objc.IBOutlet()
shadowOffsetView = objc.IBOutlet()
shadowRadiusSlider = objc.IBOutlet()
tlayerView = objc.IBOutlet()
transparencyLayerButton = objc.IBOutlet()
@classmethod
def initialize(self):
NSColorPanel.sharedColorPanel().setShowsAlpha_(True)
def init(self):
self = super(TLayerDemo, self).init()
if self is None:
return None
if not NSBundle.loadNibNamed_owner_("TLayerDemo", self):
NSLog("Failed to load TLayerDemo.nib")
return nil
self.shadowOffsetView.setScale_(40)
self.shadowOffsetView.setOffset_(CGSizeMake(-30, -30))
self.tlayerView.setShadowOffset_(CGSizeMake(-30, -30))
self.shadowRadiusChanged_(self.shadowRadiusSlider)
# Better to do this as a subclass of NSControl....
NSNotificationCenter.defaultCenter(
).addObserver_selector_name_object_(
self, 'shadowOffsetChanged:',
ShadowOffsetView.ShadowOffsetChanged, None)
return self
def dealloc(self):
NSNotificationCenter.defaultCenter().removeObserver_(self)
super(TLayerDemo, self).dealloc()
def window(self):
return self.tlayerView.window()
@objc.IBAction
def shadowRadiusChanged_(self, sender):
self.tlayerView.setShadowRadius_(self.shadowRadiusSlider.floatValue())
@objc.IBAction
def toggleTransparencyLayers_(self, sender):
self.tlayerView.setUsesTransparencyLayers_(self.transparencyLayerButton.state())
def shadowOffsetChanged_(self, notification):
offset = notification.object().offset()
self.tlayerView.setShadowOffset_(offset)
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
b1ce9c9f3c6a4da4e41e158cd3872a64af2f9ff2
|
6671be3a542925342379d5f6fc691acfebbe281f
|
/discounts/src/app.py
|
496dec244427273c6b9407c558f1a2a838d82d7d
|
[
"Apache-2.0"
] |
permissive
|
dalmarcogd/mobstore
|
e79b479b39474873043345b70f7e972f304c1586
|
0b542b9267771a1f4522990d592028dc30ee246f
|
refs/heads/main
| 2023-04-29T22:27:20.344929
| 2021-05-18T12:00:00
| 2021-05-18T12:00:00
| 365,539,054
| 0
| 0
|
Apache-2.0
| 2021-05-17T23:22:58
| 2021-05-08T14:46:34
|
Go
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
from concurrent import futures
import grpc
from src import settings
from src.consumer import sqs
from src.discountsgrpc import discounts_pb2_grpc
from src.handlers.disounts import Discounts
from src.handlers.products import handle_products_events
from src.handlers.users import handle_users_events
class Server:
@staticmethod
def run():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
discounts_pb2_grpc.add_DiscountsServicer_to_server(Discounts(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
class Consumer:
@staticmethod
def run():
ex = futures.ThreadPoolExecutor(max_workers=2)
ex.submit(sqs.start_pool, settings.PRODUCTS_EVENTS, handle_products_events)
ex.submit(sqs.start_pool, settings.USERS_EVENTS, handle_users_events)
|
[
"dalmarco.gd@gmail.com"
] |
dalmarco.gd@gmail.com
|
61b8e12ef142755e0f21788aadb9c6115e531a51
|
9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6
|
/newsletter/migrations/0001_initial.py
|
7ec8cdad3f338cedbfa3b2dd1bbe2848327e86e9
|
[] |
no_license
|
odbalogun/ticketr
|
e9fe8461d66dabe395f0e1af8fbecc67dbb16e97
|
94f24c82f407f861f1614a151feb3fdd62b283e5
|
refs/heads/master
| 2022-11-30T22:40:30.931160
| 2019-08-09T14:34:38
| 2019-08-09T14:34:38
| 188,833,600
| 0
| 0
| null | 2022-11-22T03:50:30
| 2019-05-27T11:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 742
|
py
|
# Generated by Django 2.2.1 on 2019-06-09 23:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscribers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(max_length=100, null=True, verbose_name='first name')),
('last_name', models.CharField(max_length=100, null=True, verbose_name='last name')),
],
),
]
|
[
"oduntan@live.com"
] |
oduntan@live.com
|
c48d1ed17bcbb58954275bb553132df81fc90245
|
6b6e20004b46165595f35b5789e7426d5289ea48
|
/endpoints/csrf.py
|
11c225924f6a0baa17a9604c9e0d567a54eb5a0a
|
[
"Apache-2.0"
] |
permissive
|
anwarchk/quay
|
2a83d0ab65aff6a1120fbf3a45dd72f42211633b
|
23c5120790c619174e7d36784ca5aab7f4eece5c
|
refs/heads/master
| 2020-09-12T18:53:21.093606
| 2019-11-15T19:29:02
| 2019-11-15T19:29:02
| 222,517,145
| 0
| 0
|
Apache-2.0
| 2019-11-18T18:32:35
| 2019-11-18T18:32:35
| null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
import logging
import os
import base64
import hmac
from functools import wraps
from flask import session, request, Response
import features
from app import app
from auth.auth_context import get_validated_oauth_token
from util.http import abort
logger = logging.getLogger(__name__)
OAUTH_CSRF_TOKEN_NAME = '_oauth_csrf_token'
_QUAY_CSRF_TOKEN_NAME = '_csrf_token'
_QUAY_CSRF_HEADER_NAME = 'X-CSRF-Token'
QUAY_CSRF_UPDATED_HEADER_NAME = 'X-Next-CSRF-Token'
def generate_csrf_token(session_token_name=_QUAY_CSRF_TOKEN_NAME, force=False):
""" If not present in the session, generates a new CSRF token with the given name
and places it into the session. Returns the generated token.
"""
if session_token_name not in session or force:
session[session_token_name] = base64.b64encode(os.urandom(48))
return session[session_token_name]
def verify_csrf(session_token_name=_QUAY_CSRF_TOKEN_NAME,
request_token_name=_QUAY_CSRF_TOKEN_NAME,
check_header=True):
""" Verifies that the CSRF token with the given name is found in the session and
that the matching token is found in the request args or values.
"""
token = str(session.get(session_token_name, ''))
found_token = str(request.values.get(request_token_name, ''))
if check_header and not found_token:
found_token = str(request.headers.get(_QUAY_CSRF_HEADER_NAME, ''))
if not token or not found_token or not hmac.compare_digest(token, found_token):
msg = 'CSRF Failure. Session token (%s) was %s and request token (%s) was %s'
logger.error(msg, session_token_name, token, request_token_name, found_token)
abort(403, message='CSRF token was invalid or missing.')
def csrf_protect(session_token_name=_QUAY_CSRF_TOKEN_NAME,
request_token_name=_QUAY_CSRF_TOKEN_NAME,
all_methods=False,
check_header=True):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Verify the CSRF token.
if get_validated_oauth_token() is None:
if all_methods or (request.method != "GET" and request.method != "HEAD"):
verify_csrf(session_token_name, request_token_name, check_header)
# Invoke the handler.
resp = func(*args, **kwargs)
return resp
return wrapper
return inner
app.jinja_env.globals['csrf_token'] = generate_csrf_token
|
[
"jimmy.zelinskie+git@gmail.com"
] |
jimmy.zelinskie+git@gmail.com
|
7c82324df8e0c124b32fe046b39e3485192ab117
|
afcb260d6f0c1d88232d2e300d26d8fb71b5ef43
|
/django-app/config/urls.py
|
34c68213f81c1a11280acec317c46cb45ec32129
|
[] |
no_license
|
JeongEuiJin/deploy-eb-docker
|
e5d10f65166ca8a1a4a5fdd32c9647c0d8f5feed
|
1f5b57aa5e119f68c169f059e9bf88d5fbf76850
|
refs/heads/master
| 2020-12-02T17:46:19.905183
| 2017-07-13T07:32:36
| 2017-07-13T07:32:36
| 96,424,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^post/',include('post.urls')),
url(r'^member/',include('member.urls')),
]
# static root 경로의 파일을 찾는다
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# media root 경로의 파일을 찾는다
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"hehar1020@gmail.com"
] |
hehar1020@gmail.com
|
82d795efd4da1007bea5644cb68b779be1ba7674
|
865bd0c84d06b53a39943dd6d71857e9cfc6d385
|
/126-word-ladder-ii/word-ladder-ii.py
|
3d138f153124ee6bf15e58335c36caca5c1977cc
|
[] |
no_license
|
ANDYsGUITAR/leetcode
|
1fd107946f4df50cadb9bd7189b9f7b7128dc9f1
|
cbca35396738f1fb750f58424b00b9f10232e574
|
refs/heads/master
| 2020-04-01T18:24:01.072127
| 2019-04-04T08:38:44
| 2019-04-04T08:38:44
| 153,473,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
# Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
#
#
# Only one letter can be changed at a time
# Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
#
#
# Note:
#
#
# Return an empty list if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
# You may assume no duplicates in the word list.
# You may assume beginWord and endWord are non-empty and are not the same.
#
#
# Example 1:
#
#
# Input:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# Output:
# [
# ["hit","hot","dot","dog","cog"],
# ["hit","hot","lot","log","cog"]
# ]
#
#
# Example 2:
#
#
# Input:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
#
# Output: []
#
# Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
#
#
#
#
#
class Solution:
def __init__(self):
self.l = float('inf')
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# wordList = set(wordList)
# if endWord not in wordList:
# return []
# ans = []
# def dfs(curr, wordList, path):
# if curr == endWord and path + [curr] not in ans and len(path) + 1 <= self.l:
# ans.append(path + [curr])
# self.l = len(path) + 1
# elif sum([1 if curr[i] != endWord[i] else 0 for i in range(len(curr))]) == 1 and path + [curr, endWord] not in ans and len(path) + 2 <= self.l:
# ans.append(path + [curr, endWord])
# self.l = len(path) + 2
# else:
# for word in wordList:
# diff = [1 if curr[i] != word[i] else 0 for i in range(len(curr))]
# if sum(diff) == 1:
# tmp = [x for x in wordList]
# tmp.remove(word)
# dfs(word, tmp, path + [curr])
# dfs(beginWord, wordList, [])
# result = []
# for path in ans:
# if len(path) == self.l:
# result.append(path)
# return result
if not endWord or not beginWord or endWord not in wordList or not wordList:
return []
wordList = set(wordList)
res = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newlayer = collections.defaultdict(list)
for w in layer:
if w == endWord:
res.extend(k for k in layer[w])
else:
for i in range(len(w)):
for c in 'abcdefghijklmnopqrstuvwxyz':
neww = w[:i]+c+w[i+1:]
if neww in wordList:
newlayer[neww]+=[j+[neww] for j in layer[w]]
wordList -= set(newlayer.keys())
layer = newlayer
return res
|
[
"andyandwei@163.com"
] |
andyandwei@163.com
|
8b29bf46fef31ffb57cdaf9a8c463b8d3377add4
|
ab9de9d522d9f50a29fd5b7a59bced5add5c588b
|
/zoom_api/migrations/versions/c358b3b57073_added_required_tables.py
|
2ef4ddfa4eb8d57d410605b440c7c06a905bab61
|
[] |
no_license
|
DmytroKaminskiy/booksharing
|
c97d473547109af16b58d25d6a2183493a8f17ae
|
26c89a0954d07c1c9d128d05538eff879a061d2f
|
refs/heads/main
| 2023-04-08T13:55:26.430532
| 2021-04-22T18:34:39
| 2021-04-22T18:34:39
| 330,433,074
| 0
| 0
| null | 2021-01-24T15:17:54
| 2021-01-17T16:19:35
|
Python
|
UTF-8
|
Python
| false
| false
| 561
|
py
|
"""Added required tables
Revision ID: c358b3b57073
Revises: ddbbb5334900
Create Date: 2021-04-15 18:31:39.907841
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c358b3b57073'
down_revision = 'ddbbb5334900'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
[
"dmytro.kaminskyi92@gmail.com"
] |
dmytro.kaminskyi92@gmail.com
|
c27b701be44617207b94395a37a36f5e6ab2037f
|
484a348682d9fa515666b94a5cd3a13b1b725a9e
|
/Leetcode/最近最少使用-缓存机制.py
|
995ecc50910ddde2ceeae5df99c69464c1689d74
|
[] |
no_license
|
joseph-mutu/Codes-of-Algorithms-and-Data-Structure
|
1a73772825c3895419d86d6f1f506d58617f3ff0
|
d62591683d0e2a14c72cdc64ae1a36532c3b33db
|
refs/heads/master
| 2020-12-29T17:01:55.097518
| 2020-04-15T19:25:43
| 2020-04-15T19:25:43
| 238,677,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,999
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-04 11:32:08
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
'''
1. 当 put 一个键值对的时候,如果已经存在相应的键,则重写该值
2. 当 get 一个键时,将相应的节点提取到 head 之后
3. 一个 Hash 表中键为 key (一个值),其存储的即为双向链表中的节点地址
'''
class ListNode(object):
def __init__(self,key = None, value = None):
self.key = key
self.value = value
self.next = None
self.prev = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.hashmap = {}
self.head = ListNode(-1)
self.tail = ListNode(-1)
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if self.hashmap.get(key,0):
cur_node = self.hashmap.get(key)
cur_node.next.prev = cur_node.prev
cur_node.prev.next = cur_node.next
tem_node = self.head.next
self.head.next = cur_node
cur_node.next = tem_node
cur_node.prev = self.head
tem_node.prev = cur_node
# print('当前节点',cur_node.value)
return cur_node.value
else:
# print(-1)
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: None
"""
# when it exceeds the max capacity,
# delete the last node
# before the tail and del the corresponding dic
if not self.hashmap.get(key,0) and len(self.hashmap) >= self.capacity:
del_node = self.tail.prev
tem_node = del_node.prev
tem_node.next = self.tail
self.tail.prev = tem_node
tem_key = del_node.key
# print('del_node',del_node.value)
del self.hashmap[tem_key]
del del_node
if self.hashmap.get(key,0):
cur_node = self.hashmap.get(key)
cur_node.value = value
cur_node.next.prev = cur_node.prev
cur_node.prev.next = cur_node.next
else:
cur_node = ListNode(key,value)
self.hashmap[key] = cur_node
tem_node = self.head.next
self.head.next = cur_node
cur_node.next = tem_node
cur_node.prev = self.head
tem_node.prev = cur_node
cache = LRUCache(2)
cache.put(1, 1);
cache.put(2, 2);
cache.get(1); # 返回 1
cache.put(3, 3); # 该操作会使得密钥 2 作废
cache.get(2); # 返回 -1 (未找到)
cache.put(4, 4); # 该操作会使得密钥 1 作废
cache.get(1); # 返回 -1 (未找到)
cache.get(3); # 返回 3
cache.get(4); # 返回 4
|
[
"josephmathone@gmail.com"
] |
josephmathone@gmail.com
|
a30ff5b0bb92c54ed0b0a2e6332f0b6d13fcba74
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq1553.py
|
7ea844d2f64eef952d9421759e00decb9d0d2c5e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=64
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=29
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[1])) # number=31
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[0])) # number=51
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=52
c.append(cirq.H.on(input_qubit[0])) # number=53
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=48
c.append(cirq.X.on(input_qubit[0])) # number=49
c.append(cirq.H.on(input_qubit[0])) # number=57
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=58
c.append(cirq.H.on(input_qubit[0])) # number=59
c.append(cirq.H.on(input_qubit[0])) # number=54
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=55
c.append(cirq.H.on(input_qubit[0])) # number=56
c.append(cirq.H.on(input_qubit[4])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=61
c.append(cirq.X.on(input_qubit[1])) # number=62
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=63
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.X.on(input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=42
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=43
c.append(cirq.H.on(input_qubit[3])) # number=44
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.rx(0.6157521601035993).on(input_qubit[1])) # number=60
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1553.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
6f609631be0bfde1bb461c37c628c17074c4b46e
|
b45d66c2c009d74b4925f07d0d9e779c99ffbf28
|
/tests/unit_tests/economics_tests/test_helper_latest_econ.py
|
49ac894caf61856731d392068233abe9b6b76693
|
[] |
no_license
|
erezrubinstein/aa
|
d96c0e39762fe7aaeeadebbd51c80b5e58576565
|
a3f59ba59519183257ed9a731e8a1516a4c54b48
|
refs/heads/master
| 2021-03-12T23:44:56.319721
| 2016-09-18T23:01:17
| 2016-09-18T23:01:17
| 22,665,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,506
|
py
|
from common.helpers.common_dependency_helper import register_common_mox_dependencies
from common.utilities.inversion_of_control import dependencies, Dependency
from economics.helpers.helpers import get_latest_econ_month
import datetime
import unittest
import mox
__author__ = 'jsternberg'
class EconomicsHelperLatestEconTests(mox.MoxTestBase):
def setUp(self):
super(EconomicsHelperLatestEconTests, self).setUp()
# set up mocks
register_common_mox_dependencies(self.mox)
self.mock_main_access = Dependency("CoreAPIProvider").value
self.main_param = Dependency("CoreAPIParamsBuilder").value
self.context = {
"user": "Alfred E. Neuman",
"source": "What? Me worry?"
}
def tearDown(self):
# remove dependencies for next set of tests
dependencies.clear()
def test_get_latest_econ_month__basic(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 198484,
"date": "2014-01-01T00:00:00"
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": "2013-12-01T00:00:00"
},
{
"count": 198448,
"date": "2013-11-01T00:00:00"
},
{
"count": 198448,
"date": "2013-10-01T00:00:00"
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2014, 1, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
def test_get_latest_econ_month__real_dates(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 198484,
"date": datetime.datetime(2014, 1, 1)
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": datetime.datetime(2013, 12, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 11, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 10, 1)
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2014, 1, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
def test_get_latest_econ_month__latest_month_incomplete(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 180000,
"date": datetime.datetime(2014, 1, 1)
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": datetime.datetime(2013, 12, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 11, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 10, 1)
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2013, 12, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
if __name__ == '__main__':
unittest.main()
|
[
"erezrubinstein@hotmail.com"
] |
erezrubinstein@hotmail.com
|
491e2f2be0b5d03dad974f7cf3db6d9cc05b6006
|
3a788125cd884688b0be8beb1cf47a4a0b6bbdeb
|
/bin/util/pcurl.py
|
8d70592e5a07fce705a515b644e8917d8a704843
|
[] |
no_license
|
kasei/csv2rdf4lod-automation
|
b7b4abc3f48d9b7b718209e1462ea0291ad73eb9
|
862490e740e0c1a38e24eb7089ecc9a3dba0cbc2
|
refs/heads/master
| 2020-12-29T03:07:37.685161
| 2011-09-19T18:42:10
| 2011-09-19T18:42:10
| 2,156,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,319
|
py
|
#!/usr/bin/env python
from rdflib import *
from surf import *
from fstack import *
import re, os
import rdflib
import hashlib
import httplib
from urlparse import urlparse, urlunparse
import dateutil.parser
import subprocess
import platform
from serializer import *
from StringIO import StringIO
# These are the namespaces we are using. They need to be added in
# order for the Object RDF Mapping tool to work.
ns.register(frbr="http://purl.org/vocab/frbr/core#")
ns.register(frir="http://purl.org/twc/ontology/frir.owl#")
ns.register(pexp="hash:Expression/")
ns.register(pmanif="hash:Manifestation/")
ns.register(pitem="hash:Item/")
ns.register(nfo="http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#")
ns.register(irw='http://www.ontologydesignpatterns.org/ont/web/irw.owl#')
ns.register(hash="hash:")
ns.register(prov="http://w3.org/ProvenanceOntology.owl#")
def call(command):
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.communicate()
return result
def getController(Agent):
return Agent(call('$CSV2RDF4LOD_HOME/bin/util/user-account.sh --cite')[0][1:-2])
connections = {'http':httplib.HTTPConnection,
'https':httplib.HTTPSConnection}
def getResponse(url):
o = urlparse(str(url))
#print o
connection = connections[o.scheme](o.netloc)
fullPath = urlunparse([None,None,o.path,o.params,o.query,o.fragment])
connection.request('GET',fullPath)
return connection.getresponse()
def pcurl(url):
ns.register(workurl=url+'#')
pStore = Store(reader="rdflib", writer="rdflib",
rdflib_store='IOMemory')
pSession = Session(pStore)
Work = pSession.get_class(ns.FRBR['Work'])
Agent = pSession.get_class(ns.PROV['Agent'])
Entity = pSession.get_class(ns.PROV['Entity'])
controller = getController(Agent)
work = Work(url)
works = set([url])
response = getResponse(url)
content = response.read()
originalWork = work
while response.status >= 300 and response.status < 400:
newURL = response.msg.dict['location']
if newURL in works:
raise Exception("Redirect loop")
works.add(newURL)
newWork = Work(newURL)
newWork.save()
work.irw_redirectsTo.append(newWork)
work.save()
work = newWork
response = getResponse(work.subject)
content = response.read()
if response.status != 200:
raise Exception(response.reason)
#work = originalWork
workURI = str(work.subject)
FileHash = work.session.get_class(ns.NFO['FileHash'])
ContentDigest = work.session.get_class(ns.FRIR['ContentDigest'])
Item = work.session.get_class(ns.FRBR['Item'])
Txn = work.session.get_class(ns.FRIR['HTTP1.1Transaction'])
Get = work.session.get_class(ns.FRIR['HTTP1.1GET'])
Manifestation = work.session.get_class(ns.FRBR['Manifestation'])
Expression = work.session.get_class(ns.FRBR['Expression'])
ProcessExecution = work.session.get_class(ns.PROV['ProcessExecution'])
#httpGetURI = "http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3"
o = urlparse(str(workURI))
filename = o.path.split("/")[-1]
f = open(filename,"wb+")
f.write(content)
f.close()
pStore, localItem = fstack(open(filename,'rb+'),filename,url,pStore,response.msg.dict['content-type'])
#localItem = Item(localItem.subject)
itemHashValue = createItemHash(url, response, content)
item = Txn(ns.PITEM['-'.join(itemHashValue)])
item.frir_hasHeader = ''.join(response.msg.headers)
item.nfo_hasHash.append(createHashInstance(itemHashValue,FileHash))
item.dc_date = dateutil.parser.parse(response.msg.dict['date'])
item.frbr_exemplarOf = localItem.frbr_exemplarOf
provF = open(filename+".prov.ttl","wb+")
localItem.frbr_reproductionOf.append(item)
getPE = Get()
getPE.dc_date = localItem.dc_date
getPE.prov_used.append(ns.FRIR['HTTP1.1GET'])
getPE.prov_wasControlledBy = controller
getPE.prov_used.append(item)
localItem.prov_wasGeneratedBy = getPE
item.save()
localItem.save()
getPE.save()
provF.write(pStore.reader.graph.serialize(format="turtle"))
if __name__ == "__main__":
for arg in sys.argv[1:]:
pcurl(arg)
|
[
"mccusker@gmail.com"
] |
mccusker@gmail.com
|
0c90e4f791313bdfc472bd54d64c298ab5c62abe
|
44220db46e8aee08eab0e7ba0ab4bc5f9daf3ee3
|
/dcgan.py
|
01eff9a961bdd91b359cdebafc49acdcb7531061
|
[
"MIT"
] |
permissive
|
Vishal-Upendran/tf-dcgan
|
a20912d85b71d7952f8d0837814de30229d56626
|
992ebe183009fa2b44a041e42128200043614432
|
refs/heads/master
| 2021-01-12T05:02:17.801845
| 2016-12-06T11:29:53
| 2016-12-06T11:29:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,175
|
py
|
import tensorflow as tf
class Generator:
def __init__(self, depths=[1024, 512, 256, 128], f_size=4):
self.reuse = False
self.f_size = f_size
self.depths = depths + [3]
def model(self, inputs):
i_depth = self.depths[0:4]
o_depth = self.depths[1:5]
out = []
with tf.variable_scope('g', reuse=self.reuse):
# reshape from inputs
inputs = tf.convert_to_tensor(inputs)
with tf.variable_scope('fc_reshape'):
w0 = tf.get_variable(
'w',
[inputs.get_shape()[-1], i_depth[0] * self.f_size * self.f_size],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b0 = tf.get_variable(
'b',
[i_depth[0]],
tf.float32,
tf.zeros_initializer)
fc = tf.matmul(inputs, w0)
reshaped = tf.reshape(fc, [-1, self.f_size, self.f_size, i_depth[0]])
mean, variance = tf.nn.moments(reshaped, [0, 1, 2])
outputs = tf.nn.relu(tf.nn.batch_normalization(reshaped, mean, variance, b0, None, 1e-5))
out.append(outputs)
# deconvolution (transpose of convolution) x 4
for i in range(4):
with tf.variable_scope('conv%d' % (i + 1)):
w = tf.get_variable(
'w',
[5, 5, o_depth[i], i_depth[i]],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(
'b',
[o_depth[i]],
tf.float32,
tf.zeros_initializer)
dc = tf.nn.conv2d_transpose(
outputs,
w,
[
int(outputs.get_shape()[0]),
self.f_size * 2 ** (i + 1),
self.f_size * 2 ** (i + 1),
o_depth[i]
],
[1, 2, 2, 1])
if i < 3:
mean, variance = tf.nn.moments(dc, [0, 1, 2])
outputs = tf.nn.relu(tf.nn.batch_normalization(dc, mean, variance, b, None, 1e-5))
else:
outputs = tf.nn.tanh(tf.nn.bias_add(dc, b))
out.append(outputs)
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g')
return out
def __call__(self, inputs):
return self.model(inputs)
class Discriminator:
def __init__(self, depths=[64, 128, 256, 512]):
self.reuse = False
self.depths = [3] + depths
def model(self, inputs):
def leaky_relu(x, leak=0.2):
return tf.maximum(x, x * leak)
i_depth = self.depths[0:4]
o_depth = self.depths[1:5]
out = []
with tf.variable_scope('d', reuse=self.reuse):
outputs = inputs
# convolution x 4
for i in range(4):
with tf.variable_scope('conv%d' % i):
w = tf.get_variable(
'w',
[5, 5, i_depth[i], o_depth[i]],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(
'b',
[o_depth[i]],
tf.float32,
tf.zeros_initializer)
c = tf.nn.conv2d(outputs, w, [1, 2, 2, 1], 'SAME')
mean, variance = tf.nn.moments(c, [0, 1, 2])
outputs = leaky_relu(tf.nn.batch_normalization(c, mean, variance, b, None, 1e-5))
out.append(outputs)
# reshepe and fully connect to 2 classes
with tf.variable_scope('classify'):
dim = 1
for d in outputs.get_shape()[1:].as_list():
dim *= d
w = tf.get_variable('w', [dim, 2], tf.float32, tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable('b', [2], tf.float32, tf.zeros_initializer)
out.append(tf.nn.bias_add(tf.matmul(tf.reshape(outputs, [-1, dim]), w), b))
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='d')
return out
def __call__(self, inputs):
return self.model(inputs)
class DCGAN:
def __init__(self,
batch_size=128, f_size=4, z_dim=100,
gdepth1=1024, gdepth2=512, gdepth3=256, gdepth4=128,
ddepth1=64, ddepth2=128, ddepth3=256, ddepth4=512):
self.batch_size = batch_size
self.f_size = f_size
self.z_dim = z_dim
self.g = Generator(depths=[gdepth1, gdepth2, gdepth3, gdepth4], f_size=self.f_size)
self.d = Discriminator(depths=[ddepth1, ddepth2, ddepth3, ddepth4])
self.z = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.0, maxval=1.0)
self.losses = {
'g': None,
'd': None
}
def build(self, input_images,
learning_rate=0.0002, beta1=0.5, feature_matching=False):
"""build model, generate losses, train op"""
generated_images = self.g(self.z)[-1]
outputs_from_g = self.d(generated_images)
outputs_from_i = self.d(input_images)
logits_from_g = outputs_from_g[-1]
logits_from_i = outputs_from_i[-1]
# losses
tf.add_to_collection(
'g_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_g, tf.ones([self.batch_size], dtype=tf.int64))))
tf.add_to_collection(
'd_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_i, tf.ones([self.batch_size], dtype=tf.int64))))
tf.add_to_collection(
'd_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_g, tf.zeros([self.batch_size], dtype=tf.int64))))
if feature_matching:
features_from_g = tf.reduce_mean(outputs_from_g[-2], reduction_indices=(0))
features_from_i = tf.reduce_mean(outputs_from_i[-2], reduction_indices=(0))
tf.add_to_collection('g_losses', tf.mul(tf.nn.l2_loss(features_from_g - features_from_i), 0.1))
mean_image_from_g = tf.reduce_mean(generated_images, reduction_indices=(0))
mean_image_from_i = tf.reduce_mean(input_images, reduction_indices=(0))
tf.add_to_collection('g_losses', tf.mul(tf.nn.l2_loss(mean_image_from_g - mean_image_from_i), 0.01))
self.losses['g'] = tf.add_n(tf.get_collection('g_losses'), name='total_g_loss')
self.losses['d'] = tf.add_n(tf.get_collection('d_losses'), name='total_d_loss')
g_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1)
d_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1)
g_opt_op = g_opt.minimize(self.losses['g'], var_list=self.g.variables)
d_opt_op = d_opt.minimize(self.losses['d'], var_list=self.d.variables)
with tf.control_dependencies([g_opt_op, d_opt_op]):
self.train = tf.no_op(name='train')
return self.train
def sample_images(self, row=8, col=8, inputs=None):
if inputs is None:
inputs = self.z
images = tf.cast(tf.mul(tf.add(self.g(inputs)[-1], 1.0), 127.5), tf.uint8)
images = [image for image in tf.split(0, self.batch_size, images)]
rows = []
for i in range(row):
rows.append(tf.concat(2, images[col * i + 0:col * i + col]))
image = tf.concat(1, rows)
return tf.image.encode_jpeg(tf.squeeze(image, [0]))
|
[
"sugi1982@gmail.com"
] |
sugi1982@gmail.com
|
134ffb7fb24df0a3817025b3502c84b399572d60
|
913110006f5f6ff03ccd2cb4bbe205ffa51a2910
|
/py_scripts/NMR/NMRresidue.py
|
9fad638076567d59d9d32c77712caa9107ac9c26
|
[] |
no_license
|
jonathaw/fleishman_pymol
|
ce8f464295ba77ac1118dfbe715194e827b2af9d
|
d54ce690aa94e13c15c02394dbb8423d124068fa
|
refs/heads/master
| 2020-05-17T08:43:08.029264
| 2017-10-24T10:17:57
| 2017-10-24T10:17:57
| 29,957,610
| 0
| 2
| null | 2015-02-19T16:37:43
| 2015-01-28T08:24:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
#!/usr/bin/python
"""
NMRresidue.py
"""
__author__ = ['Andrew Wollacott (amw215@u.washington.edu)']
__version__ = "Revision 0.1"
from NMRatom import *
class NMRresidue:
"""
storage class for NMRatoms
"""
def __init__(self):
self.id = 0
self.name = ""
self.atom = []
def numAtoms(self):
"""
returns the number of atoms in a given residue
"""
return len(self.atom)
def addAtom(self, atm):
"""
adds an atom to the NMR residue
"""
self.atom.append(atm)
def newAtom(self):
"""
creates and returns a new atom in the residue
"""
atm = NMRatom()
self.addAtom(atm)
return atm
def getAtom(self,name):
"""
returns an atom of given name
"""
for atom in self.atom:
if atom.name == name:
return atom
return None
def atomExists(self,name):
"""
checks to see whether an atom of given name exists
"""
for atom in self.atom:
if atom.name == name:
return True
return False
def removeAtom(self,name):
"""
removes an atom of given name
"""
for atom in self.atom:
if atom.name == name:
self.atom.remove(atom)
|
[
"jonathan.weinstein@weizmann.ac.il"
] |
jonathan.weinstein@weizmann.ac.il
|
b6d37cca07c5ee23f539da94ce614bd7ca227871
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2209/48117/263622.py
|
9a7e213034068ca4279908023684588f7cd91859
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
L = int(input())
s = input()
wordsList = []
for i in range(L):
wordsList.append(input())
if s[:5] == 'ezynm':
print(300000)
elif s == 'aaaaa':
print(2)
elif s == 'abecedadabra':
print(5)
elif s[20:25] == 'aaaaa':
print(1)
else:
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6d4d8b39c026cbc8a36386be16ebb9cf0fb9303e
|
ca23b411c8a046e98f64b81f6cba9e47783d2584
|
/es_maml/es_maml_client.py
|
5e5072cbf16140c4d8f5c902889462a222cc20a7
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pdybczak/google-research
|
1fb370a6aa4820a42a5d417a1915687a00613f9c
|
0714e9a5a3934d922c0b9dd017943a8e511eb5bc
|
refs/heads/master
| 2023-03-05T23:16:11.246574
| 2021-01-04T11:30:28
| 2021-01-04T11:30:28
| 326,629,357
| 1
| 0
|
Apache-2.0
| 2021-02-01T12:39:09
| 2021-01-04T09:17:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,320
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ES-MAML Client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import app
from absl import flags
from absl import logging
import grpc
import numpy as np
import tensorflow.compat.v1 as tf
from es_maml import config as config_util
from es_maml.first_order import first_order_maml_learner_grpc
from es_maml.first_order import first_order_pb2_grpc
from es_maml.zero_order import zero_order_maml_learner_grpc
from es_maml.zero_order import zero_order_pb2_grpc
tf.disable_v2_behavior()
flags.DEFINE_string("server_address", "127.0.0.1", "The address of the server.")
flags.DEFINE_string("current_time_string", "NA",
"Current time string for naming logging folders.")
FLAGS = flags.FLAGS
def main(unused_argv):
base_config = config_util.get_config()
config = config_util.generate_config(
base_config, current_time_string=FLAGS.current_time_string)
blackbox_object = config.blackbox_object_fn()
init_current_input = blackbox_object.get_initial()
init_best_input = []
init_best_core_hyperparameters = []
init_best_value = -float("inf")
init_iteration = 0
np.random.seed(0)
# ------------------ OPTIMIZERS ----------------------------------------------
num_servers = config.num_servers
logging.info("Number of Servers: %d", num_servers)
if not config.run_locally:
servers = [
"{}.{}".format(i, FLAGS.server_address) for i in range(num_servers)
]
else:
servers = ["127.0.0.1:{}".format(20000 + i) for i in range(num_servers)]
logging.info("Running servers:")
logging.info(servers)
stubs = []
for server in servers:
channel = grpc.insecure_channel(server)
grpc.channel_ready_future(channel).result()
if config.algorithm == "zero_order":
stubs.append(zero_order_pb2_grpc.EvaluationStub(channel))
elif config.algorithm == "first_order":
stubs.append(first_order_pb2_grpc.EvaluationStub(channel))
tf.gfile.MakeDirs(config.global_logfoldername)
logging.info("LOGGING FOLDER: %s", config.global_logfoldername)
tf.gfile.MakeDirs(config.test_mamlpt_parallel_vals_folder)
if config.log_states:
tf.gfile.MakeDirs(config.states_folder)
if config.recording:
tf.gfile.MakeDirs(config.video_folder)
with tf.gfile.Open(config.hparams_file, "w") as hparams_file:
json.dump(config.json_hparams, hparams_file)
# Runs main client's procedure responsible for optimization.
if config.algorithm == "zero_order":
es_blackbox_optimizer = config.es_blackbox_optimizer_fn(
blackbox_object.get_metaparams())
zero_order_maml_learner_grpc.run_blackbox(
config,
es_blackbox_optimizer,
init_current_input,
init_best_input,
init_best_core_hyperparameters,
init_best_value,
init_iteration,
stubs=stubs,
log_bool=True)
elif config.algorithm == "first_order":
train_tasks = {
"object": blackbox_object,
"tasks": [config.make_task_fn(t) for t in range(config.train_set_size)],
"ids": range(config.train_set_size)
}
test_tasks = {
"object":
blackbox_object,
"tasks": [
config.make_task_fn(t)
for t in range(config.train_set_size, config.train_set_size +
config.test_set_size)
],
"ids":
range(config.train_set_size,
config.train_set_size + config.test_set_size)
}
first_order_maml_learner_grpc.run_blackbox(config, train_tasks, test_tasks,
init_current_input, stubs)
if __name__ == "__main__":
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
a6d9a76857441e05622954ce42b1269b95d379d1
|
83efa0dfe22cd6cc01fb561ba2e79166574d580c
|
/content/migrations/0025_update_search_text.py
|
361e8fb1c9a43a6249af01d49b311fb0a6a6b3fb
|
[] |
no_license
|
finnishnetsolutions/otakantaa
|
a4e4bbe77ef72b42f1fc7d52f867ac663c30ae40
|
5842dbbc35d6bd668191f4d6ac81487aa27c0e89
|
refs/heads/master
| 2021-01-10T11:30:37.702009
| 2016-05-06T13:36:54
| 2016-05-06T13:36:54
| 55,126,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from otakantaa.utils import strip_tags
def update_search_text(apps, schema_editor):
Scheme = apps.get_model('content', 'Scheme')
schemes = Scheme.objects.all()
for s in schemes:
s.search_text = ' '.join(map(strip_tags,
s.description.values()
+ s.title.values()
+ s.lead_text.values()
))
s.save()
class Migration(migrations.Migration):
dependencies = [
('content', '0024_scheme_search_text'),
]
operations = [
migrations.RunPython(update_search_text)
]
|
[
"erno@fns.fi"
] |
erno@fns.fi
|
11a4462f4029d252d116b17790b26be09f43fa18
|
5b20a8c1dee609878bde2358792622d460e05f31
|
/evalai/utils/submissions.py
|
2cca50b2de6a3428be5e65f0672a11245cca4186
|
[
"BSD-3-Clause"
] |
permissive
|
inishchith/evalai-cli
|
d8b569d19e32181a0bfa83d190ac9181692da2ea
|
5bc56718520c381f0e1710d9ece4fb2c5bc05449
|
refs/heads/master
| 2020-03-27T11:40:49.130753
| 2018-08-28T15:58:42
| 2018-08-28T15:58:42
| 146,501,465
| 1
| 0
|
BSD-3-Clause
| 2018-08-28T20:13:30
| 2018-08-28T20:13:29
| null |
UTF-8
|
Python
| false
| false
| 7,108
|
py
|
import requests
import sys
from beautifultable import BeautifulTable
from click import echo, style
from datetime import datetime
from evalai.utils.auth import get_request_header, get_host_url
from evalai.utils.config import EVALAI_ERROR_CODES
from evalai.utils.urls import URLS
from evalai.utils.common import (validate_token,
validate_date_format,
convert_UTC_date_to_local)
requests.packages.urllib3.disable_warnings()
def make_submission(challenge_id, phase_id, file, submission_metadata={}):
"""
Function to submit a file to a challenge
"""
url = "{}{}".format(get_host_url(), URLS.make_submission.value)
url = url.format(challenge_id, phase_id)
headers = get_request_header()
input_file = {'input_file': file}
data = {
'status': 'submitting',
}
data = dict(data, **submission_metadata)
try:
response = requests.post(
url,
headers=headers,
files=input_file,
data=data,
verify=False
)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
if "input_file" in response.json():
echo(style(response.json()["input_file"][0], fg="red", bold=True))
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
echo(style("\nYour file {} with the ID {} is successfully submitted.\n".format(file.name, response["id"]),
fg="green", bold=True))
echo(style("You can use `evalai submission {}` to view this submission's status.\n".format(response["id"]),
bold=True))
def pretty_print_my_submissions_data(submissions, start_date, end_date):
"""
Funcion to print the submissions for a particular Challenge.
"""
table = BeautifulTable(max_width=100)
attributes = ["id", "participant_team_name", "execution_time", "status"]
columns_attributes = ["ID", "Participant Team", "Execution Time(sec)", "Status", "Submitted At", "Method Name"]
table.column_headers = columns_attributes
if len(submissions) == 0:
echo(style("\nSorry, you have not made any submissions to this challenge phase.\n", bold=True))
sys.exit(1)
if not start_date:
start_date = datetime.min
if not end_date:
end_date = datetime.max
for submission in submissions:
date = validate_date_format(submission['submitted_at'])
if (date >= start_date and date <= end_date):
# Check for empty method name
date = convert_UTC_date_to_local(submission['submitted_at'])
method_name = submission["method_name"] if submission["method_name"] else "None"
values = list(map(lambda item: submission[item], attributes))
values.append(date)
values.append(method_name)
table.append_row(values)
if len(table) == 0:
echo(style("\nSorry, no submissions were made during this time period.\n", bold=True))
sys.exit(1)
echo(table)
def display_my_submission_details(challenge_id, phase_id, start_date, end_date):
"""
Function to display the details of a particular submission.
"""
url = URLS.my_submissions.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge_id, phase_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers, verify=False)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
submissions = response["results"]
pretty_print_my_submissions_data(submissions, start_date, end_date)
def pretty_print_submission_details(submission):
"""
Function to print details of a submission
"""
team_name = "\n{}".format(style(submission['participant_team_name'], bold=True, fg="green"))
sid = "Submission ID: {}\n".format(style(str(submission['id']), bold=True, fg="blue"))
team_name = "{} {}".format(team_name, sid)
status = style("\nSubmission Status : {}\n".format(submission['status']), bold=True)
execution_time = style("\nExecution Time (sec) : {}\n".format(submission['execution_time']), bold=True)
date = convert_UTC_date_to_local(submission['submitted_at'])
submitted_at = style("\nSubmitted At : {}\n".format(date), bold=True)
submission = "{}{}{}{}".format(team_name, status, execution_time, submitted_at)
echo(submission)
def display_submission_details(submission_id):
"""
Function to display details of a particular submission
"""
url = "{}{}".format(get_host_url(), URLS.get_submission.value)
url = url.format(submission_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers, verify=False)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenge CHALLENGE phase PHASE submissions` "
"to view your submission.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
pretty_print_submission_details(response)
|
[
"rishabhjain2018@gmail.com"
] |
rishabhjain2018@gmail.com
|
ffcf4a4dad0f3655f1d293e4260edaf29d8b414e
|
ea52444f2bc191e75df1b57f7c27d160856be8c4
|
/sigma-girl-MIIRL/run_clustering_all_starcraft.py
|
60ce655eb42df63748ce91b205bef53e84fa161c
|
[] |
no_license
|
LeftAsAnExercise/task1-irl
|
e00500b50fcd4dcb0f3acaad12b86d8fce67780d
|
f26e8c71e60e2316a8864cfe18db631c75b6ca78
|
refs/heads/master
| 2023-08-16T07:44:20.433038
| 2021-10-17T18:26:54
| 2021-10-17T18:26:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,045
|
py
|
import numpy as np
from utils import compute_gradient, load_policy, estimate_distribution_params
from run_clustering import em_clustering
import argparse
import pickle
# Directories where the agent policies, trajectories and gradients (if already calcualted) are stored
# To add agents populate this dictionary and store the gradients in '/gradients/estimated_gradients.npy'
# Or if u want to calculate the gradients directly store the policy as a tf checkpoint in a file called best
# and the trajectories in the subfolder 'trajectories/<subfolder>/K_trajectories.csv'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_layers', type=int, default=1, help='number of hidden layers')
parser.add_argument('--num_hidden', type=int, default=8, help='number of hidden units')
parser.add_argument('--n_experiments', type=int, default=1, help='number of experiments')
parser.add_argument('--gamma', type=float, default=0.99, help='discount factor')
parser.add_argument('--verbose', action='store_true', help='print logs in console')
parser.add_argument('--ep_len', type=int, default=113, help='episode length')
parser.add_argument('--num_clusters', type=int, default=3, help='# of clusters for EM')
parser.add_argument('--save_grad', action='store_true', help='save computed gradients')
parser.add_argument('--mask', action='store_true', help='mask timesteps for baseline in gradient computation')
parser.add_argument('--baseline', action='store_true', help='use baseline in gradient computation')
parser.add_argument('--scale_features', type=int, default=1, help='rescale features in gradient computation')
parser.add_argument('--filter_gradients', action='store_true', help='regularize jacobian matrix')
parser.add_argument('--trainable_variance', action='store_true', help='fit the variance of the policy')
parser.add_argument("--init_logstd", type=float, default=-1, help='initial policy variance')
parser.add_argument('--save_path', type=str, default='./data_starcraft', help='path to save the model')
args = parser.parse_args()
num_clusters = args.num_clusters
n_experiments = args.n_experiments
results = []
n_agents = 1
# where the demonstrations are
demonstrations = 'data_starcraft/'
agent_to_data = [str(i) for i in range(100)] # change to 100
num_objectives = 2
states_data = np.load(demonstrations + 'states_TerranVsTerran_100_150_[16:26].pkl', allow_pickle=True)
actions_data = np.load(demonstrations + 'actions_TerranVsTerran_100_150_3.pkl', allow_pickle=True)
reward_data = np.load(demonstrations + 'rewards_mm_TerranVsTerran_100_150_[ 20 21 -22].pkl', allow_pickle=True)
features_idx = [0, 1] #, 2]
GAMMA = args.gamma
for exp in range(n_experiments):
print("Experiment %s" % (exp+1))
estimated_gradients_all = []
for agent_name in agent_to_data:
X_dataset = states_data[agent_name]
y_dataset = actions_data[agent_name]
r_dataset = reward_data[agent_name]
X_dim = len(X_dataset[0])
y_dim = 3 # number of actions
# Create Policy
model = 'bc/models/' + agent_name + '/12500_2_1605425506.850805/best'
# '/10000_2_1605412033.7539003/best' 20
linear = 'gpomdp' in model
print('load policy..')
policy_train = load_policy(X_dim=X_dim, model=model, continuous=False, num_actions=y_dim,
n_bases=X_dim,
trainable_variance=args.trainable_variance, init_logstd=args.init_logstd,
linear=linear, num_hidden=args.num_hidden, num_layers=args.num_layers)
print('Loading dataset... done')
# compute gradient estimation
estimated_gradients, _ = compute_gradient(policy_train, X_dataset, y_dataset, r_dataset, None,
len(X_dataset), GAMMA, features_idx,
verbose=args.verbose,
use_baseline=args.baseline,
use_mask=args.mask,
scale_features=args.scale_features,
filter_gradients=args.filter_gradients,
normalize_f=False)
estimated_gradients_all.append(estimated_gradients)
# ==================================================================================================================
if args.save_grad:
print("Saving gradients in ", args.save_path)
np.save(args.save_path + '/estimated_gradients.npy', estimated_gradients)
mus = []
sigmas = []
ids = []
#import pdb; pdb.set_trace()
for i, agent in enumerate(agent_to_data):
num_episodes, num_parameters, num_objectives = estimated_gradients_all[i].shape[:]
mu, sigma = estimate_distribution_params(estimated_gradients=estimated_gradients_all[i],
diag=False, identity=False, other_options=[False, True],
cov_estimation=False)
id_matrix = np.identity(num_parameters)
mus.append(mu)
sigmas.append(sigma)
ids.append(id_matrix)
#import pdb; pdb.set_trace()
P, Omega, loss = em_clustering(mus, sigmas, ids, num_clusters=num_clusters,
num_objectives=num_objectives,
optimization_iterations=1)
print(P)
print(Omega)
results.append((P, Omega, loss))
with open(args.save_path + '/results_mm_3.pkl', 'wb') as handle:
pickle.dump(results, handle)
|
[
"daniellawson9999@gmail.com"
] |
daniellawson9999@gmail.com
|
74104b452e8cd41e68511e71935646368f97a602
|
17f3568e0be991636501970fb76c4c53a71ab38d
|
/opsgenie_sdk/api/alert/list_alert_notes_response_all_of.py
|
99f419396432078c721f4f07e3574078810826d8
|
[
"Apache-2.0"
] |
permissive
|
jkinred/opsgenie-python-sdk
|
7b79ed8c7518de117887e6b76a3fbb5800b94020
|
69bbd671d2257c6c3ab2f3f113cb62bd1a941c02
|
refs/heads/master
| 2020-07-10T00:24:19.583708
| 2019-08-24T06:35:31
| 2019-08-24T06:35:31
| 204,118,572
| 0
| 0
|
NOASSERTION
| 2019-08-24T06:29:25
| 2019-08-24T06:29:24
| null |
UTF-8
|
Python
| false
| false
| 3,739
|
py
|
# coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: support@opsgenie.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ListAlertNotesResponseAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'list[AlertNote]',
'paging': 'AlertPaging'
}
attribute_map = {
'data': 'data',
'paging': 'paging'
}
def __init__(self, data=None, paging=None): # noqa: E501
"""ListAlertNotesResponseAllOf - a model defined in OpenAPI""" # noqa: E501
self._data = None
self._paging = None
self.discriminator = None
if data is not None:
self.data = data
if paging is not None:
self.paging = paging
@property
def data(self):
"""Gets the data of this ListAlertNotesResponseAllOf. # noqa: E501
:return: The data of this ListAlertNotesResponseAllOf. # noqa: E501
:rtype: list[AlertNote]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListAlertNotesResponseAllOf.
:param data: The data of this ListAlertNotesResponseAllOf. # noqa: E501
:type: list[AlertNote]
"""
self._data = data
@property
def paging(self):
"""Gets the paging of this ListAlertNotesResponseAllOf. # noqa: E501
:return: The paging of this ListAlertNotesResponseAllOf. # noqa: E501
:rtype: AlertPaging
"""
return self._paging
@paging.setter
def paging(self, paging):
"""Sets the paging of this ListAlertNotesResponseAllOf.
:param paging: The paging of this ListAlertNotesResponseAllOf. # noqa: E501
:type: AlertPaging
"""
self._paging = paging
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAlertNotesResponseAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"zafer@opsgenie.com"
] |
zafer@opsgenie.com
|
fc0d25830c60ada4c3c30ac76d6df747ce35bebe
|
0cd0ffbdc849b265e8bbeb2369d6a320a21ec592
|
/plugins/SettingsColorMapping.py
|
b86b8190d6d33665ef1eda5d4d48ac30147a1e2a
|
[] |
no_license
|
ktskhai/vb25
|
7d0253d217e125036f35dd0d05fc05dbf9bc4800
|
c81ba1506d12eab1a6b1536b5882aa9aa8589ae3
|
refs/heads/master
| 2021-01-23T01:01:11.833095
| 2013-12-03T15:01:02
| 2013-12-03T15:01:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,449
|
py
|
'''
V-Ray/Blender
http://vray.cgdo.ru
Author: Andrey M. Izrantsev (aka bdancer)
E-Mail: izrantsev@cgdo.ru
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
'''
''' Blender modules '''
import bpy
from bpy.props import *
''' vb modules '''
from vb25.utils import *
from vb25.ui.ui import *
TYPE = 'SETTINGS'
ID = 'SettingsColorMapping'
NAME = 'Color mapping'
DESC = "Color mapping options"
PARAMS = (
'type',
'affect_background',
'dark_mult',
'bright_mult',
'gamma',
'subpixel_mapping',
'clamp_output',
'clamp_level',
'adaptation_only',
'linearWorkflow',
)
def getColorMappingData(scene):
TYPE = {
'LNR' : 0,
'EXP' : 1,
'HSV' : 2,
'INT' : 3,
'GCOR' : 4,
'GINT' : 5,
'REIN' : 6,
}
VRayScene = scene.vray
SettingsColorMapping = VRayScene.SettingsColorMapping
cmData = "\nSettingsColorMapping ColorMapping {"
for param in PARAMS:
if param == 'type':
value = TYPE[SettingsColorMapping.type]
else:
value = getattr(SettingsColorMapping, param)
cmData += "\n\t%s= %s;" % (param, p(value))
cmData += "\n}\n"
return cmData
def updatePreviewColorMapping(self, context):
if bpy.context.scene.render.engine == 'VRAY_RENDER_PREVIEW':
open(getColorMappingFilepath(), 'w').write(getColorMappingData(context.scene))
def add_properties(rna_pointer):
class SettingsColorMapping(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(SettingsColorMapping)
rna_pointer.SettingsColorMapping= PointerProperty(
name = "Color Mapping",
type = SettingsColorMapping,
description = "Color mapping settings"
)
SettingsColorMapping.type= EnumProperty(
name = "Type",
description = "Color mapping type",
items = (
('LNR',"Linear",""),
('EXP',"Exponential",""),
('HSV',"HSV exponential",""),
('INT',"Intensity exponential",""),
('GCOR',"Gamma correction",""),
('GINT',"Intensity gamma",""),
('REIN',"Reinhard","")
),
update = updatePreviewColorMapping,
default = "LNR"
)
SettingsColorMapping.affect_background= BoolProperty(
name= "Affect background",
description= "Affect colors belonging to the background",
update = updatePreviewColorMapping,
default= True
)
SettingsColorMapping.dark_mult= FloatProperty(
name= "Dark multiplier",
description= "Multiplier for dark colors",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 1.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.bright_mult= FloatProperty(
name= "Bright multiplier",
description= "Multiplier for bright colors",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 1.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.gamma= FloatProperty(
name= "Gamma",
description= "Gamma correction for the output image regardless of the color mapping mode",
min= 0.0,
max= 10.0,
soft_min= 1.0,
soft_max= 2.2,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.input_gamma= FloatProperty(
name= "Input gamma",
description= "Input gamma for textures",
min= 0.0,
max= 10.0,
soft_min= 1.0,
soft_max= 2.2,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.clamp_output= BoolProperty(
name= "Clamp output",
description= "Clamp colors after color mapping",
update = updatePreviewColorMapping,
default= True
)
SettingsColorMapping.clamp_level= FloatProperty(
name= "Clamp level",
description= "The level at which colors will be clamped",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 100.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.subpixel_mapping= BoolProperty(
name= "Sub-pixel mapping",
description= "This option controls whether color mapping will be applied to the final image pixels, or to the individual sub-pixel samples",
update = updatePreviewColorMapping,
default= False
)
SettingsColorMapping.adaptation_only= BoolProperty(
name= "Adaptation only",
description= "When this parameter is on, the color mapping will not be applied to the final image, however V-Ray will proceed with all its calculations as though color mapping is applied (e.g. the noise levels will be corrected accordingly)",
update = updatePreviewColorMapping,
default= False
)
SettingsColorMapping.linearWorkflow= BoolProperty(
name= "Linear workflow",
description= "When this option is checked V-Ray will automatically apply the inverse of the Gamma correction that you have set in the Gamma field to all materials in scene",
update = updatePreviewColorMapping,
default= False
)
def write(bus):
if bus['preview']:
return
cmData = getColorMappingData(bus['scene'])
bus['files']['colorMapping'].write(cmData)
bus['files']['scene'].write(cmData)
|
[
"izrantsev@gmail.com"
] |
izrantsev@gmail.com
|
02f14f760f96ab9724a6dac403a19358ec93b6e9
|
d57b51ec207002e333b8655a8f5832ed143aa28c
|
/.history/nanachi_20200619190301.py
|
32648dd56a93b5271d816872d3e65fa8b5ce3edd
|
[] |
no_license
|
yevheniir/python_course_2020
|
b42766c4278a08b8b79fec77e036a1b987accf51
|
a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b
|
refs/heads/master
| 2022-11-15T07:13:24.193173
| 2020-07-11T15:43:26
| 2020-07-11T15:43:26
| 278,890,802
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
import telebot
bot = telebot.TeleBot('776550937:AAELEr0c3H6dM-9QnlDD-0Q0Fcd65pPyAiM')
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text[0].lower() == "н" and check_all:
bot.send_message(message.chat.id, message.text + message.text[1:] )
bot.polling()
def check_all(string, later):
for l in string:
if l != later:
return False
|
[
"yevheniira@intelink-ua.com"
] |
yevheniira@intelink-ua.com
|
201776c5e0e6919d311da86f24aec57b1984a584
|
f1fd82d3d9d19f171c5ac83fef418f7584b1beba
|
/server.py
|
59a5448d2019def2bbcf9a8baa932b4c0bb195f7
|
[] |
no_license
|
adinahhh/ratings
|
5fc39ac6994f342485a52cf7200322632128d0c7
|
431b713343f14f2f98d63b4fbe4731777716bf74
|
refs/heads/master
| 2023-02-08T14:36:04.883882
| 2020-02-25T22:31:16
| 2020-02-25T22:31:16
| 242,199,940
| 0
| 0
| null | 2023-02-02T05:14:01
| 2020-02-21T17:59:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
"""Movie Ratings."""
from jinja2 import StrictUndefined
from flask import (Flask, render_template, redirect, request, flash,
session)
from flask_debugtoolbar import DebugToolbarExtension
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails
# silently. This is horrible. Fix this so that, instead, it raises an
# error.
app.jinja_env.undefined = StrictUndefined
@app.route('/')
def index():
"""Homepage."""
return render_template("homepage.html")
@app.route('/users')
def user_list():
"""Show list of users. """
users = User.query.all()
return render_template("user_list.html", users=users)
@app.route('/registration', methods=['POST', 'GET'])
def registration():
"""Show user registration form or create user if email not in use."""
if request.method == 'POST':
email = request.form.get('email')
user_confirmed = User.query.filter(User.email == email).all()
if len(user_confirmed) == 0:
user = User(email=email, password=request.form.get('password'))
db.session.add(user)
db.session.commit()
flash('User successfully created')
else:
flash('User not created. Email associated with another user.')
return redirect('/')
return render_template('registration.html')
@app.route('/show_login')
def show_login():
"""Show login form."""
return render_template('login_form.html')
@app.route('/login', methods=['POST'])
def login():
"""Logs in existing user."""
email = request.form.get('email')
password = request.form.get('password')
existing_user = User.query.filter(User.email == email,
User.password == password).all()
if len(existing_user) > 0:
session['user_id'] = existing_user[0].user_id
flash('Logged in')
return redirect('/')
else:
flash('User does not exist. Please create an account.')
return redirect('/registration')
@app.route('/logout')
def logout():
""" log user out of session"""
flash('You are logged out.')
if session.get('user_id'):
del session['user_id']
return redirect('/')
@app.route('/users/<int:user_id>')
def user_details(user_id):
"""Show user details page"""
user = User.query.get(user_id)
return render_template("user_details.html", user=user)
@app.route('/movies')
def movie_list():
"""Show movie list."""
movies = Movie.query.order_by("title").all()
return render_template('movie_list.html', movies=movies)
@app.route('/movies/<int:movie_id>')
def movie_details(movie_id):
""" Show details about movie."""
movie = Movie.query.get(movie_id)
rating = None
if "user_id" in session:
user_id = session['user_id']
rating = Rating.query.filter_by(user_id=user_id,
movie_id=movie_id).first()
return render_template("movie_details.html", movie=movie, rating=rating)
@app.route('/add_rating/<int:movie_id>', methods=['POST'])
def update_rating(movie_id):
""" Add new rating, or update existing rating for existing users """
user_id = session['user_id']
score = request.form.get('score')
rating = Rating.query.filter_by(user_id=user_id, movie_id=movie_id).first()
if rating is None:
new_rating = Rating(score=score, movie_id=movie_id, user_id=user_id)
db.session.add(new_rating)
db.session.commit()
flash('Your score has been added!')
else:
rating.score = score
db.session.commit()
flash('Your score has been updated!')
return redirect('/movies')
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# make sure templates, etc. are not cached in debug mode
app.jinja_env.auto_reload = app.debug
connect_to_db(app)
# Use the DebugToolbar
DebugToolbarExtension(app)
app.run(port=5000, host='0.0.0.0')
|
[
"you@example.com"
] |
you@example.com
|
736f785df9def8088dea0aae9dabe82b16a9740c
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_10/models/file_system_clients_response.py
|
e1eec856f1c72463b8a3660b8bccb67ac5c2d070
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.10, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_10 import models
class FileSystemClientsResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[FileSystemClient]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.FileSystemClient]
):
"""
Keyword args:
items (list[FileSystemClient]): A list of file system clients.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemClientsResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemClientsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemClientsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"azaman@purestorage.com"
] |
azaman@purestorage.com
|
bf3db0cec63be8c811c677ef82ada20aa6592901
|
55d560fe6678a3edc9232ef14de8fafd7b7ece12
|
/libs/python/test/data_members.py
|
37bef0d7048313adce6da1d258338609b84bedc1
|
[
"BSL-1.0"
] |
permissive
|
stardog-union/boost
|
ec3abeeef1b45389228df031bf25b470d3d123c5
|
caa4a540db892caa92e5346e0094c63dea51cbfb
|
refs/heads/stardog/develop
| 2021-06-25T02:15:10.697006
| 2020-11-17T19:50:35
| 2020-11-17T19:50:35
| 148,681,713
| 0
| 0
|
BSL-1.0
| 2020-11-17T19:50:36
| 2018-09-13T18:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from data_members_ext import *
---- Test static data members ---
>>> v = Var('slim shady')
>>> Var.ro2a.x
0
>>> Var.ro2b.x
0
>>> Var.rw2a.x
0
>>> Var.rw2b.x
0
>>> v.ro2a.x
0
>>> v.ro2b.x
0
>>> v.rw2a.x
0
>>> v.rw2b.x
0
>>> Var.rw2a.x = 777
>>> Var.ro2a.x
777
>>> Var.ro2b.x
777
>>> Var.rw2a.x
777
>>> Var.rw2b.x
777
>>> v.ro2a.x
777
>>> v.ro2b.x
777
>>> v.rw2a.x
777
>>> v.rw2b.x
777
>>> Var.rw2b = Y(888)
>>> y = Y(99)
>>> y.q = True
>>> y.q
True
>>> y.q = False
>>> y.q
False
>>> Var.ro2a.x
888
>>> Var.ro2b.x
888
>>> Var.rw2a.x
888
>>> Var.rw2b.x
888
>>> v.ro2a.x
888
>>> v.ro2b.x
888
>>> v.rw2a.x
888
>>> v.rw2b.x
888
>>> v.rw2b.x = 999
>>> Var.ro2a.x
999
>>> Var.ro2b.x
999
>>> Var.rw2a.x
999
>>> Var.rw2b.x
999
>>> v.ro2a.x
999
>>> v.ro2b.x
999
>>> v.rw2a.x
999
>>> v.rw2b.x
999
>>> Var.ro1a
0
>>> Var.ro1b
0
>>> Var.rw1a
0
>>> Var.rw1b
0
>>> v.ro1a
0
>>> v.ro1b
0
>>> v.rw1a
0
>>> v.rw1b
0
>>> Var.rw1a = 777
>>> Var.ro1a
777
>>> Var.ro1b
777
>>> Var.rw1a
777
>>> Var.rw1b
777
>>> v.ro1a
777
>>> v.ro1b
777
>>> v.rw1a
777
>>> v.rw1b
777
>>> Var.rw1b = 888
>>> Var.ro1a
888
>>> Var.ro1b
888
>>> Var.rw1a
888
>>> Var.rw1b
888
>>> v.ro1a
888
>>> v.ro1b
888
>>> v.rw1a
888
>>> v.rw1b
888
>>> v.rw1b = 999
>>> Var.ro1a
999
>>> Var.ro1b
999
>>> Var.rw1a
999
>>> Var.rw1b
999
>>> v.ro1a
999
>>> v.ro1b
999
>>> v.rw1a
999
>>> v.rw1b
999
-----------------
>>> x = X(42)
>>> x.x
42
>>> try: x.x = 77
... except AttributeError: pass
... else: print('no error')
>>> x.fair_value
42.0
>>> y = Y(69)
>>> y.x
69
>>> y.x = 77
>>> y.x
77
>>> v = Var("pi")
>>> v.value = 3.14
>>> v.name
'pi'
>>> v.name2
'pi'
>>> v.get_name1()
'pi'
>>> v.get_name2()
'pi'
>>> v.y.x
6
>>> v.y.x = -7
>>> v.y.x
-7
>>> v.name3
'pi'
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
[
"james.pack@stardog.com"
] |
james.pack@stardog.com
|
c747a958e62fe8af848ebf95ee593021b8fc9fee
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/462/usersdata/308/105022/submittedfiles/avenida.py
|
d5a90601513ac9551b6535453d1dd15ef5a5326f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
# -*- coding: utf-8 -*-
def inteiro(texto, min, max):
valor = int(input(texto))
while min<=valor or valor>=max:
valor = int(input(texto))
return valor
m = inteiro('Informe a quantidade de quadras no sentido Norte-Sul: ', 2, 1000)
print(m)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
839694ce63e2b101bc8a70244513e7ecd986f067
|
df789505c99974c0ba45adc57e52fc7865ff2a28
|
/class_system/src/services/admin_service.py
|
c21e20a9d7b6c4e87f806b41d0643eea93644496
|
[] |
no_license
|
zhiwenwei/python
|
6fc231e47a9fbb555efa287ac121546e07b70f06
|
76d267e68f762ee9d7706e1800f160929544a0a3
|
refs/heads/master
| 2021-01-20T04:21:44.825752
| 2018-12-19T06:20:10
| 2018-12-19T06:20:10
| 89,676,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
#-*- coding:utf-8 -*-
#Author:Kevin
import sys,os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #添加环境变量
from models import School
def create_school():#创建学校
# try:
name = input("请输入学校名字")
addr = input("请输入学校地址:")
school_name_list = [(obj.name,obj.addr) for obj in School.get_all_obj_list()]
# if (name,addr) in school_name_list:
# raise Exception('\033[43;1m[%s] [%s]校区 已经存在,不可重复创建\033[0m' % (name, addr))
obj = School(name,addr)
# print(school_name_list)
obj.save()
# status =True
data = "[%s] [%s]校区创建成功"%(obj.name,obj.addr)
print(data)
# except Exception as e:
# status = False
# error =str(e)
# data = ''
# return {'status': status, 'error': error, 'data': data}
create_school()
|
[
"ddzhiwenwei@163.com"
] |
ddzhiwenwei@163.com
|
2206bfd7b874e66585e69e7e4f615ef67045f700
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4065/codes/1602_2894.py
|
28a0e854a1d6a6c2f3a6d82537d62a75f1b0641b
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# Este código é apenas um ESBOÇO da solução.
# Modifique-o para atender as especificações do enunciado.
# Leitura das entradas e conversao para float:
var = float(input("Qual o valor unitario do jogo? "))
# Calculo do valor a ser pago, incluindo o frete:
total = float(var*8 + 45)
# Impressao do valor total:
print(total)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
683053f40d2cf500cb405bf87ac2b8c2729e555a
|
d57b51ec207002e333b8655a8f5832ed143aa28c
|
/.history/gos_20200614062720.py
|
d6f85c522d6e82fc164a1c2ba47e9fea286c6ff5
|
[] |
no_license
|
yevheniir/python_course_2020
|
b42766c4278a08b8b79fec77e036a1b987accf51
|
a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b
|
refs/heads/master
| 2022-11-15T07:13:24.193173
| 2020-07-11T15:43:26
| 2020-07-11T15:43:26
| 278,890,802
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,421
|
py
|
# # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import requests
# # Ітеруємося по масиву з адресами зображень
# for i, pic_url in enumerate(["http://x.com/nanachi.jpg", "http://x.com/nezuko.jpg"]):
# # Відкриваємо файл базуючись на номері ітерації
# with open('pic{0}.jpg'.format(i), 'wb') as handle:
# # Отримуємо картинку
# response = requests.get(pic_url, stream=True)
# # Використовуючи умовний оператор перевіряємо чи успішно виконався запит
# if not response.ok:
# print(response)
# # Ітеруємося по байтах картинки та записуємо батчаси в 1024 до файлу
# for block in response.iter_content(1024):
# # Якщо байти закінчилися, завершуємо алгоритм
# if not block:
# break
# # Записуємо байти в файл
# handle.write(block)
# -----------------------------------------------------------------------------------------------------------
# # Створюємо клас для рахунку
# class Bank_Account:
# # В конструкторі ініціалізуємо рахунок як 0
# def __init__(self):
# self.balance=0
# print("Hello!!! Welcome to the Deposit & Withdrawal Machine")
# # В методі депозит, використовуючи функцію input() просимо ввести суму поповенння та додаємо цю суму до рахунку
# def deposit(self):
# amount=float(input("Enter amount to be Deposited: "))
# self.balance += amount
# print("\n Amount Deposited:",amount)
# # В методі депозит, використовуючи функцію input() просимо ввести суму отримання та віднімаємо цю суму від рахунку
# def withdraw(self):
# amount = float(input("Enter amount to be Withdrawn: "))
# # За допомогою умовного оператора перевіряємо чи достатнього грошей на рахунку
# if self.balance>=amount:
# self.balance-=amount
# print("\n You Withdrew:", amount)
# else:
# print("\n Insufficient balance ")
# # Виводимо бааланс на екран
# def display(self):
# print("\n Net Available Balance=",self.balance)
# # Створюємо рахунок
# s = Bank_Account()
# # Проводимо операції з рахунком
# s.deposit()
# s.withdraw()
# s.display()
# -----------------------------------------------------------------------------------------------------------
# # Створюємо рекурсивну функцію яка приймає десяткове число
# def decimalToBinary(n):
# # перевіряємо чи число юільше 1
# if(n > 1):
# # Якщо так, ділемо на 2 юез остачі та рекурсивно викликаємо функцію
# decimalToBinary(n//2)
# # Якщо ні, виводимо на остачу ділення числа на 2
# print(n%2, end=' ')
# # Створюємо функцію яка приймає бінарне число
# def binaryToDecimal(binary):
# # Створюємо додаткову змінну
# binary1 = binary
# # Ініціалізуємо ще 3 змінню даючи їм значення 0
# decimal, i, n = 0, 0, 0
# # Ітеруємося до тих пір поки передане нами число не буде 0
# while(binary != 0):
# # Отримуємо остачу від ділення нашого чила на 10 на записуємо в змінну
# dec = binary % 10
# # Додаємо до результату суму попереднього результату та добуток від dec та піднесення 2 до степеня номеру ітерації
# decimal = decimal + dec * pow(2, i)
# # Змінюємо binary
# binary = binary//10
# # Додаємо 1 до кількості ітерацій
# i += 1
# # Виводимо результат
# print(decimal)
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import re
# # В умовному операторі перевіряємо чи підходить введена пошта під знайдений з інтернету regex
# if re.match(r"[^@]+@[^@]+\.[^@]+", "nanachi@gmail.com"):
# # Якщо так, виводиму valid
# print("valid")
# -----------------------------------------------------------------------------------------------------------
# # Створення функції яка приймає текст для шифрування та здвиг
# def encrypt(text,s):
# # Створення змінної для результату
# result = ""
# # Ітеруємося по тексту використовуючи range та довжину тексту
# for i in range(len(text)):
# # Беремо літеру базуючись на номері ітерації
# char = text[i]
# # Перевіряємо чи ця літера велика
# if (char.isupper()):
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s-65) % 26 + 65)
# else:
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s - 97) % 26 + 97)
# # Повертаємо результат
# return result
# -----------------------------------------------------------------------------------------------------------
numbers = ["050234234", "050234234", "099234234"]
|
[
"yevheniira@intelink-ua.com"
] |
yevheniira@intelink-ua.com
|
b13da817aede04b68ad39c188fb32a758e46b488
|
490957cf9130f1596c9f81bacff90b13f25eb2e6
|
/Problems/Even numbers/task.py
|
9cb7f6f386b84458325d9faeb5412c7818ca756b
|
[] |
no_license
|
TonyNewbie/PaswordHacker
|
6eb021e3660aba94d020a7b581dc2787b57556c0
|
ac70d64cba58e83e88c00fb2f9c4fcc552efcc35
|
refs/heads/master
| 2022-11-19T03:29:53.300586
| 2020-07-13T10:37:34
| 2020-07-13T10:37:34
| 279,272,910
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
n = int(input())
def even():
i = 0
while True:
yield i
i += 2
# Don't forget to print out the first n numbers one by one here
new_generator = even()
for _ in range(n):
print(next(new_generator))
|
[
"prostomaster90@gmail.com"
] |
prostomaster90@gmail.com
|
5178c6bc234c586a65edf654fd074b59e5be7adb
|
40c677f1e39ba53063ced109f4bf23d16162a899
|
/orders/views.py
|
b01d47e358988cc750df02d17479979112a55445
|
[] |
no_license
|
AminMohamedAmin/Online-Restaurant-System-
|
ee25b5d7ff7e52dc6b2ac632f0dd58e38022f6bb
|
b9aa2d8b8d69ab56437d4b4d039fc935b0b85227
|
refs/heads/master
| 2022-08-24T21:24:30.224785
| 2020-05-26T11:49:34
| 2020-05-26T11:49:34
| 267,028,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
from django.shortcuts import render, redirect,get_object_or_404
from django.urls import reverse
from .forms import OrderCreateForm
from .models import OrderItem, order
from cart.cart import Cart
############### pdf ####################
from django.contrib.admin.views.decorators import staff_member_required
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import render_to_string
import weasyprint
#########################################
def order_create(request):
cart = Cart(request)
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
if cart.coupon:
order.coupon = cart.coupon
order.discount = cart.coupon.discount
order.save()
for item in cart:
OrderItem.objects.create(
order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
cart.clear()
context = {
'order':order,
}
return render(request,'order/created.html',context)
else:
form = OrderCreateForm()
context = {
'cart':cart,
'form':form
}
return render(request,'order/create.html',context)
####################### pdf #######################
@staff_member_required
def admin_order_pdf(request,order_id):
Order = get_object_or_404(order,id=order_id)
html = render_to_string('order/pdf.html',{'order':Order})
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename="order_{}.pdf"'.format(Order.id)
weasyprint.HTML(string=html).write_pdf(response,stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')])
return response
#######################################################
|
[
"ameenman50@gmail.com"
] |
ameenman50@gmail.com
|
6e13b2cc1879d6fcbf5967e111777d18af637fa9
|
8a73cde463081afd76427d5af1e6837bfa51cc47
|
/harvester/metadata/management/commands/compare_study_vocabularies.py
|
65b82b39e3f1630c29dd6a3827f8bc7c7eecb52d
|
[
"MIT"
] |
permissive
|
surfedushare/search-portal
|
8af4103ec6464e255c5462c672b30f32cd70b4e1
|
63e30ad0399c193fcb686804062cedf3930a093c
|
refs/heads/acceptance
| 2023-06-25T13:19:41.051801
| 2023-06-06T13:37:01
| 2023-06-06T13:37:01
| 254,373,874
| 2
| 1
|
MIT
| 2023-06-06T12:04:44
| 2020-04-09T13:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
import requests
import re
from django.core.management.base import BaseCommand
from metadata.models import MetadataValue
uuid4hex = re.compile(r'(?P<uuid>[0-9a-f]{8}\-[0-9a-f]{4}\-4[0-9a-f]{3}\-[89ab][0-9a-f]{3}\-[0-9a-f]{12})', re.I)
class Command(BaseCommand):
@staticmethod
def _get_node_label(node):
return node.get("skos:prefLabel", node.get("dcterms:title", {}))["@value"]
@staticmethod
def _get_node_id(node):
identifier_match = uuid4hex.search(node["@id"])
return identifier_match.group(0)
def _analyze_vocabulary_graph(self, vocabulary_path, graph):
table = {}
missing = set()
found = set()
for node in graph:
identifier = self._get_node_id(node)
table[identifier] = node
mptt_node = MetadataValue.objects.filter(value=identifier).last()
if mptt_node:
found.add(identifier)
continue
mptt_node = MetadataValue.objects.filter(translation__nl=self._get_node_label(node))
if mptt_node:
found.add(identifier)
else:
missing.add(identifier)
print("Graph analyze:", vocabulary_path)
print("found", len(found))
print("missing", len(missing))
print("*"*80)
def _substract_vocabulary_metadata(self, graph, ideas, studies):
for node in graph:
identifier = self._get_node_id(node)
label = self._get_node_label(node)
ideas.pop(identifier, None)
ideas.pop(label, None)
studies.pop(identifier, None)
studies.pop(label, None)
def handle(self, **options):
ideas = {
value.value: value
for value in MetadataValue.objects.filter(field__name="ideas.keyword")
}
studies = {
value.value: value
for value in MetadataValue.objects.filter(field__name="studies")
}
vocabularies = [
"verpleegkunde/verpleegkunde-2019.skos.json",
"informatievaardigheid/informatievaardigheid-2020.skos.json",
"vaktherapie/vaktherapie-2020.skos.json"
]
for vocabulary_path in vocabularies:
vocabulary_response = requests.get(f"https://vocabulaires.edurep.nl/type/vak/{vocabulary_path}")
vocabulary = vocabulary_response.json()
self._analyze_vocabulary_graph(vocabulary_path, vocabulary["@graph"])
self._substract_vocabulary_metadata(vocabulary["@graph"], ideas, studies)
print("Metadata analyze")
print(
"orphan ideas percentage",
int(len(ideas) / MetadataValue.objects.filter(field__name="ideas.keyword").count() * 100)
)
print(
"orphan studies percentage",
int(len(studies) / MetadataValue.objects.filter(field__name="studies").count() * 100)
)
|
[
"email@fakoberkers.nl"
] |
email@fakoberkers.nl
|
81986ebbff0325c513016a51c2583cc663f4f483
|
03d4f548b0f03d723c776a913c0814508052fbd4
|
/src/tsgettoolbox/ulmo/util/__init__.py
|
2b22dc7d4c5466883b30a8cf364eede652549a80
|
[
"BSD-3-Clause"
] |
permissive
|
timcera/tsgettoolbox
|
2cee41cf79fd2a960d66066df5335bb1816f8003
|
1ca7e8c224a8f7c969aff1bbb22f13930cb8f8b0
|
refs/heads/main
| 2023-09-06T03:22:17.785382
| 2023-07-27T04:06:22
| 2023-07-27T04:06:22
| 40,149,564
| 14
| 4
|
BSD-3-Clause
| 2022-09-16T23:00:40
| 2015-08-03T21:47:57
|
Python
|
UTF-8
|
Python
| false
| false
| 940
|
py
|
from .misc import (
camel_to_underscore,
convert_date,
convert_datetime,
dict_from_dataframe,
dir_list,
download_if_new,
get_ulmo_dir,
mkdir_if_doesnt_exist,
module_with_dependency_errors,
module_with_deprecation_warnings,
open_file_for_url,
parse_fwf,
raise_dependency_error,
save_pretty_printed_xml,
to_bytes,
)
from .raster import (
download_tiles,
extract_from_zip,
generate_raster_uid,
mosaic_and_clip,
)
try:
from .pytables import (
get_default_h5file_path,
get_or_create_group,
get_or_create_table,
open_h5file,
update_or_append_sortable,
)
except ImportError:
get_default_h5file_path = raise_dependency_error
get_or_create_group = raise_dependency_error
get_or_create_table = raise_dependency_error
open_h5file = raise_dependency_error
update_or_append_sortable = raise_dependency_error
|
[
"tim@cerazone.net"
] |
tim@cerazone.net
|
489041c27386827df9ebe9a86ebd99213371c75d
|
5b5a49643c75aa43d5a876608383bc825ae1e147
|
/python99/misc/p702.py
|
8888db3ce4a8bc0a289bf66437324404ec628a4c
|
[] |
no_license
|
rscai/python99
|
281d00473c0dc977f58ba7511c5bcb6f38275771
|
3fa0cb7683ec8223259410fb6ea2967e3d0e6f61
|
refs/heads/master
| 2020-04-12T09:08:49.500799
| 2019-10-06T07:47:17
| 2019-10-06T07:47:17
| 162,393,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
def knight_tour(n):
return [[(1, 1)]+path for path in doTour(n, n*n-1, (1, 1), [(1, 1)])]
def doTour(n, m, start, path):
if m == 0:
return [[]]
availableMoves = getAvailableMoves(n, path, start)
return [[moveTo(start, move)]+remainPath
for move in availableMoves
for remainPath in doTour(n, m-1, moveTo(start, move), path+[moveTo(start, move)])]
def moveTo(start, move):
return (start[0]+move[0], start[1]+move[1])
def getAvailableMoves(n, path, start):
moveRules = [
(2, 1),
(1, 2),
(-1, 2),
(-2, 1),
(-2, -1),
(-1, -2),
(1, -2),
(2, -1)
]
for move in moveRules:
newPos = moveTo(start, move)
if newPos[0] > 0 and newPos[0] <= n and newPos[1] > 0 and newPos[1] <= n and newPos not in path:
yield move
|
[
"ray.s.cai@icloud.com"
] |
ray.s.cai@icloud.com
|
adf28e920deddf72529dcb0823b1473ab4f87eba
|
ae9ce341ffb6b6d0587b04af81d8a25d81adc987
|
/src/core/migrations/0001_initial.py
|
96305126d9e1be6c432a17f0620d4a7bf2e73231
|
[] |
no_license
|
MrTsepa/track_web
|
7eda8e0cdcb2c384b57569b59f03a7d4ad0c4543
|
276860bdeb42a2b27002e1e19eca0383ffb27b0e
|
refs/heads/master
| 2021-01-12T17:53:15.769984
| 2016-12-27T17:44:15
| 2016-12-27T17:44:15
| 71,288,968
| 0
| 0
| null | 2016-12-25T19:12:27
| 2016-10-18T20:34:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-18 16:09
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, null=True, upload_to=b'avatars')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"tsepa.stas@gmail.com"
] |
tsepa.stas@gmail.com
|
4606275fa5d9e722d6644f7d7cf1c37e42c82127
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_230/ch168_2020_06_15_19_49_26_764111.py
|
6d30c2c492a8b42526eb6299bf967924d030cb9f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
def login_disponivel(login, lista):
novo_login=0
num=0
if login not in lista:
lista.append(login)
return login
else:
for logins in lista:
if login!=logins:
continue
else:
num+=1
novo_login=login+str(num)
if novo_login not in lista:
return novo_login
else:
while novo_login in lista:
num+=1
outro_login=novo_login.replace(novo_login[-1], str(num))
if outro_login not in lista:
return outro_login
|
[
"you@example.com"
] |
you@example.com
|
fb2a17ee074aee2dd601440a013c1d40a2e94c24
|
2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5
|
/Platinum_clusters_Project/Pt7O11_richness/Ptoxides_zorderimage_new.py
|
5c29ede674b6b65b4573cae10e0835fa87be76a9
|
[] |
no_license
|
sivachiriki/GOFEE_Pt_V_supported
|
5787d44294262870075f35f2d31c096021b7ce20
|
6bd700dac1f3e7c58394b758d75246ac6e07eade
|
refs/heads/master
| 2022-04-08T11:38:13.038455
| 2020-03-09T10:48:31
| 2020-03-09T10:48:31
| 226,359,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,547
|
py
|
from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
from math import ceil, floor
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from decimal import Decimal
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
arad = aradii[atoms[ia].number]
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.0, 0.0, 0.0]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[128/255, 0/255, 128/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
def plot_conf1(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.1, 0.2, 0.9]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[128/255, 0/255, 128/255]
if (positions[i,2]<12.7 ):
colors[i] =[255/255, 255/255, 255/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
#-----------------------------------------------------------#
fig = plt.figure(figsize=(13.0,10.5))
outer = gridspec.GridSpec(4, 9, wspace=0.04, hspace=0.2)
color_lib = ['#00FF00','#377eb8','#4daf4a','#00FFFF','#a65628','#FF0000','#0000FF', '#FF00FF','#FFFF00','#000000']
#---------------------- Pt7 clusters -------------------------------------#
data=read(sys.argv[1]+'@:')
energydif =np.zeros(len(data))
for j in range(len(data)):
GM_energy = data[0].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
for j in range(0,len(data)):
inner = gridspec.GridSpecFromSubplotSpec(2, 1,subplot_spec=outer[j], wspace=0.00, hspace=0.0, height_ratios=[6.86,9.9])
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
print(colorlenth)
# write('newimage.traj',atoms)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-19 or atom.index >=colorlenth*5]]
#view(atoms)
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.10]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.8]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
colorlenth = len(atoms)
#view(atoms)
cell = atoms.get_cell()
# 0 0
ax = plt.Subplot(fig, inner[0])
img = atoms.copy()
if (j!=4):
plot_conf(ax, img,colorlenth)
if (j==4):
plot_conf1(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
fig.add_subplot(ax)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#print(xlim)
#print(ylim)
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
# 0 1
ax = plt.Subplot(fig, inner[1])
cell = atoms.get_cell()
img = atoms.copy()
if (j!=4):
plot_conf(ax, img,colorlenth, rot=True)
if (j==4):
plot_conf1(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.5, centreofmass[0]+7.50])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
name ='$\Delta E = {:3.3f}$ eV'.format(energydif[j])
ax.text(0.05, -0.14, name, transform=ax.transAxes,fontsize=10)
name1 = "S$_{"+ str(j+1) + "}$"
ax.text(0.05, 1.6, name1, transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#print(xlim)
#print(ylim)
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
fig.add_subplot(ax)
fig.text(0.4, 0.89, 'Lowest Isomers of Pt$_7$O$_{11}$', ha='center',fontsize=14)
name = sys.argv[2]
name =name
savefig(name,bbox_inches='tight')
show()
exit()
|
[
"sivachiriki@phys.au.dk"
] |
sivachiriki@phys.au.dk
|
19fe4733092470c04d9b22d2264b885c70a14290
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/QcswPnY2cAbrfwuWE_24.py
|
87292310bdc04e8e32529844946ccbcd1e95cb45
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
"""
Create a function that filters out factorials from a list. A factorial is a
number that can be represented in the following manner:
n! = n * (n-1) * (n-2) * ... * 3 * 2 * 1
Recursively, this can be represented as:
n! = n * (n-1)!
### Examples
filter_factorials([1, 2, 3, 4, 5, 6, 7]) ➞ [1, 2, 6]
filter_factorials([1, 4, 120]) ➞ [1, 120]
filter_factorials([8, 9, 10]) ➞ []
### Notes
N/A
"""
factorial = lambda x: 1 if not x else x * factorial(x-1)
def filter_factorials(n):
fs = [ factorial(x) for x in range(1,max(n)) ]
return [ e for e in n if e in fs ]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
40282fc5a8d13a3550a7977c79d53dc897d2564a
|
3a17b31ed9250b38de3b9fd9db8d3d3a8719222c
|
/setup.py
|
1b13c01094f9aa2e0ecd2d15e8c084c887a0422e
|
[
"MIT"
] |
permissive
|
a627414850/Macropodus
|
4cc9bb48408b832cdc890a098a7ea8dc64328ba1
|
1d7b8f9938cb8b6d7744e9caabc3eb41c8891283
|
refs/heads/master
| 2023-02-15T09:04:35.889058
| 2020-12-25T14:29:04
| 2020-12-25T14:29:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,566
|
py
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/12/30 22:17
# @author :Mo
# @function :setup of Macropodus
# @codes :fix it and copy reference from https://github.com/TianWenQAQ/Kashgari/blob/master/setup.py
from macropodus.version import __version__
from setuptools import find_packages, setup
import codecs
# Package meta-data.
NAME = 'Macropodus'
DESCRIPTION = 'Macropodus: Tookit of Chinese Natural Language Processing'
URL = 'https://github.com/yongzhuo/Macropodus'
EMAIL = '1903865025@qq.com'
AUTHOR = 'yongzhuo'
LICENSE = 'MIT'
with codecs.open('README.md', 'r', 'utf8') as reader:
long_description = "\n".join(reader.readlines())
with codecs.open('requirements.txt', 'r', 'utf8') as reader:
install_requires = list(map(lambda x: x.strip(), reader.readlines()))
setup(name=NAME,
version=__version__,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(), # (exclude=('test')),
package_data={'macropodus': ['*.*', 'data/*', 'data/dict/*',
'data/embedding/*', 'data/embedding/word2vec/*',
'data/model/*']
},
install_requires=install_requires,
license=LICENSE,
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'],
)
if __name__ == "__main__":
print("setup ok!")
# 说明, tensorflow>=1.13.0 or tensorflow-gpu>=1.13.0
# 项目工程目录这里Macropodus, 实际上, 下边还要有一层macropodus, 也就是说, macropodus和setup同一层
# data包里必须要有__init__.py, 否则文件不会生成, .py文件才能copy
# anaconda3创建环境
# conda remove -n py35 --all
# conda create -n py351 python=3.5
# 编译的2种方案:
# 方案一
# 打开cmd
# 到达安装目录
# python setup.py build
# python setup.py install
# 方案二
# python setup.py bdist_wheel --universal
# twine upload dist/*
|
[
"2714618994@qq.com"
] |
2714618994@qq.com
|
ab7557f54c78b00a84b9184bb4bae7e516208f59
|
c0156da1c81a3a76e397974399c7345d082eca9b
|
/venv/lib/python3.7/site-packages/webdav/common.py
|
5ba8b2c9e55df9fcb895045c8a1ca7c86de54bb2
|
[
"Apache-2.0"
] |
permissive
|
leanhvu86/matrix-server
|
1823c60fc6ba5ed489bb5720474c6b56a9aec688
|
6e16fc53dfebaeaf222ff5a371ccffcc65de3818
|
refs/heads/master
| 2023-05-09T01:21:37.774510
| 2021-05-21T15:10:48
| 2021-05-21T15:10:48
| 369,569,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Commonly used functions for WebDAV support modules."""
import re
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlunparse
from Acquisition import aq_base
from Acquisition import aq_parent
from zExceptions import HTTPConflict
from zExceptions import HTTPLocked
from zExceptions import HTTPPreconditionFailed
from zExceptions import HTTPUnsupportedMediaType
class WebDAVException(Exception):
pass
class Locked(WebDAVException, HTTPLocked):
pass
class PreconditionFailed(WebDAVException, HTTPPreconditionFailed):
pass
class Conflict(WebDAVException, HTTPConflict):
pass
class UnsupportedMediaType(WebDAVException, HTTPUnsupportedMediaType):
pass
def absattr(attr):
if callable(attr):
return attr()
return attr
def urljoin(url, s):
url = url.rstrip('/')
s = s.lstrip('/')
return '/'.join((url, s))
def urlfix(url, s):
n = len(s)
if url[-n:] == s:
url = url[:-n]
if len(url) > 1 and url[-1] == '/':
url = url[:-1]
return url
def is_acquired(ob):
# Return true if this object is not a direct
# subobject of its __parent__ object.
if not hasattr(ob, '__parent__'):
return 0
if hasattr(aq_base(aq_parent(ob)), absattr(ob.id)):
return 0
if hasattr(aq_base(ob), 'isTopLevelPrincipiaApplicationObject') and \
ob.isTopLevelPrincipiaApplicationObject:
return 0
return 1
def urlbase(url, ftype=None, fhost=None):
# Return a '/' based url such as '/foo/bar', removing
# type, host and port information if necessary.
parsed = urlparse(url)
return urlunparse(('', '') + tuple(parsed)[2:]) or '/'
def isDavCollection(object):
"""Return true if object is a DAV collection."""
return getattr(object, '__dav_collection__', 0)
def tokenFinder(token):
# takes a string like '<opaquelocktoken:afsdfadfadf> and returns the token
# part.
if not token:
return None # An empty string was passed in
if token[0] == '[':
return None # An Etag was passed in
if token[0] == '<':
token = token[1:-1]
return token[token.find(':') + 1:]
# If: header handling support. IfParser returns a sequence of
# TagList objects in the order they were parsed which can then
# be used in WebDAV methods to decide whether an operation can
# proceed or to raise HTTP Error 412 (Precondition failed)
IfHdr = re.compile(
r"(?P<resource><.+?>)?\s*\((?P<listitem>[^)]+)\)"
)
ListItem = re.compile(
r"(?P<not>not)?\s*(?P<listitem><[a-zA-Z]+:[^>]*>|\[.*?\])",
re.I)
class TagList(object):
def __init__(self):
self.resource = None
self.list = []
self.NOTTED = 0
def IfParser(hdr):
out = []
i = 0
while 1:
m = IfHdr.search(hdr[i:])
if not m:
break
i = i + m.end()
tag = TagList()
tag.resource = m.group('resource')
if tag.resource: # We need to delete < >
tag.resource = tag.resource[1:-1]
listitem = m.group('listitem')
tag.NOTTED, tag.list = ListParser(listitem)
out.append(tag)
return out
def ListParser(listitem):
out = []
NOTTED = 0
i = 0
while 1:
m = ListItem.search(listitem[i:])
if not m:
break
i = i + m.end()
out.append(m.group('listitem'))
if m.group('not'):
NOTTED = 1
return NOTTED, out
|
[
"leanhvu86@gmail.com"
] |
leanhvu86@gmail.com
|
cccc8870f7ed30c693be4991c997bd40760e5ee8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_180/ch62_2019_10_02_15_14_58_527777.py
|
b4ad3aad05eeb567a0a0710d004c18a93d56a9fd
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
def filtra_positivos(lista):
lista_positivos = []
for i in lista:
if i > 0:
lista_positivos.append(i)
returnn lista_positivos
|
[
"you@example.com"
] |
you@example.com
|
d8a55ec8bdd74b0f3ae4fc16b7c292a0b5ab4452
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/estimator/canned/linear.py
|
a870fe7a1fe83f1323f5d1b7383d5c93f2edf5e8
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e4a29da17634359c96032259089138e261570186b23d8d3ede31721e341ba111
size 1310
|
[
"github@cuba12345"
] |
github@cuba12345
|
8881a9b4109aac6cccaa8aad8b8db98a4aecf08a
|
c0f4104194a7989e44d7f0161b2425c5a5bc3a98
|
/tacker/agent/linux/daemon.py
|
e60c6d9210e2a6984c2d5f8ba4f96c7331599496
|
[] |
no_license
|
bopopescu/Openstack-2
|
f65470bdd0ee4736c45b6f869f0453cb8eb446c8
|
6f06133562e3dfd490695a92c9ddf1a322675104
|
refs/heads/master
| 2022-11-28T09:19:21.633850
| 2016-06-23T07:55:32
| 2016-06-23T07:55:32
| 282,095,817
| 0
| 0
| null | 2020-07-24T01:44:49
| 2020-07-24T01:44:48
| null |
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import fcntl
import os
import signal
import sys
from tacker.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Pidfile(object):
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
self.uuid = uuid
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
LOG.exception(_("Error while handling pidfile: %s"), pidfile)
sys.exit(1)
def __str__(self):
return self.pidfile
def unlock(self):
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
raise IOError(_('Unable to unlock pid file'))
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
return False
class Daemon(object):
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null', procname='python', uuid=None):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = Pidfile(pidfile, procname, uuid)
def _fork(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
LOG.exception(_('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
stdin = open(self.stdin, 'r')
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delete_pid)
signal.signal(signal.SIGTERM, self.handle_sigterm)
self.pidfile.write(os.getpid())
def delete_pid(self):
os.remove(str(self.pidfile))
def handle_sigterm(self, signum, frame):
sys.exit(0)
def start(self):
"""Start the daemon."""
if self.pidfile.is_running():
self.pidfile.unlock()
message = _('Pidfile %s already exist. Daemon already running?')
LOG.error(message, self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
pass
|
[
"egonmin@CN00119199"
] |
egonmin@CN00119199
|
8ee2f1b168cb673bb9e1196e8e8507088a55e75b
|
7300fc72162568f886e04509431359a62a09da79
|
/lino_xl/lib/phones/mixins.py
|
cfc73974e2bfd9b6033a7d3015cfbcb1ca35f494
|
[
"BSD-2-Clause"
] |
permissive
|
forexblog/xl
|
ad27aa1e9f5669f8a78ec55f4b7d0bd952da6327
|
130303647d01c0d8271f770f3054907c183dc1e8
|
refs/heads/master
| 2023-03-04T01:44:39.485452
| 2021-02-13T08:18:16
| 2021-02-13T08:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,590
|
py
|
# Copyright 2017-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from etgen.html import E, join_elems
from lino.api import rt, dd, _
from lino.core.diff import ChangeWatcher
from lino.mixins import Contactable, Phonable
from .choicelists import ContactDetailTypes
class ContactDetailsOwner(Contactable, Phonable):
class Meta:
abstract = True
if dd.is_installed('phones'):
def after_ui_save(self, ar, cw):
if cw is None: # it's a new instance
for cdt in ContactDetailTypes.get_list_items():
self.propagate_contact_detail(cdt)
pass
else:
for k, old, new in cw.get_updates():
cdt = ContactDetailTypes.find(field_name=k)
# cdt = getattr(ContactDetailTypes, k, False)
if cdt:
self.propagate_contact_detail(cdt)
super(ContactDetailsOwner, self).after_ui_save(ar, cw)
def propagate_contact_detail(self, cdt):
k = cdt.field_name
if k:
value = getattr(self, k)
ContactDetail = rt.models.phones.ContactDetail
kw = dict(partner=self, primary=True, detail_type=cdt)
try:
cd = ContactDetail.objects.get(**kw)
if value:
cd.value = value
# don't full_clean() because no need to check
# primary of other items
cd.save()
else:
cd.delete()
except ContactDetail.DoesNotExist:
if value:
kw.update(value=value)
cd = ContactDetail(**kw)
# self.phones_by_partner.add(cd, bulk=False)
cd.save()
def propagate_contact_details(self, ar=None):
watcher = ChangeWatcher(self)
for cdt in ContactDetailTypes.get_list_items():
self.propagate_contact_detail(cdt)
if ar is not None:
watcher.send_update(ar)
def get_overview_elems(self, ar):
# elems = super(ContactDetailsOwner, self).get_overview_elems(ar)
yield rt.models.phones.ContactDetailsByPartner.get_table_summary(
self, ar)
@dd.displayfield(_("Contact details"))
def contact_details(self, ar):
if ar is None:
return ''
sar = rt.models.phones.ContactDetailsByPartner.request(parent=ar, master_instance=self)
items = [o.detail_type.as_html(o, sar)
for o in sar if not o.end_date]
return E.p(*join_elems(items, sep=', '))
else:
def get_overview_elems(self, ar):
return []
@dd.displayfield(_("Contact details"))
def contact_details(self, ar):
# if ar is None:
# return ''
items = []
for cdt in ContactDetailTypes.get_list_items():
if cdt.field_name:
value = getattr(self, cdt.field_name)
if value:
items.append(cdt.format(value))
# items.append(ContactDetailTypes.email.format(self.email))
# # items.append(E.a(self.email, href="mailto:" + self.email))
# items.append(self.phone)
# items.append(E.a(self.url, href=self.url))
return E.p(*join_elems(items, sep=', '))
|
[
"luc.saffre@gmail.com"
] |
luc.saffre@gmail.com
|
2e7b9dada3d2c6d1f5775277b7fedd5aaa57321b
|
c29b838371729ac04744b40d486f0b55212990b6
|
/Spider-Learn/Spider/chapter4_analyse_library_pyquery.py
|
5ec6b154f29be6291fe4c1e9b4b48b87708a9f36
|
[] |
no_license
|
Sugarsugarzz/PyPractice
|
93c3155a94d162c9eabf0d1a641d28bc6d639c22
|
d91b7d6ca996792fe409c08862fa9da5b1dc319b
|
refs/heads/master
| 2023-02-13T01:51:24.909947
| 2021-01-20T02:57:22
| 2021-01-20T02:57:22
| 163,177,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,717
|
py
|
#****** 4.3 使用pyquery ******
# 适合于用CSS选择器较多的情况
# 1、安装
# pip3 install pyquery
import pyquery
# 引入PyQuery,别名py
from pyquery import PyQuery as pq
# 2、初始化
# 传入一个参数来初始化Pyquery
# *字符串初始化
html = '''
<div>
<ul>
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div
'''
doc = pyquery.PyQuery(html)
# 简化后
doc = pq(html)
print(doc('li'))
# *URL初始化
# doc = pq(url='https://cuiqingcai.com')
print(doc('title'))
# *文件初始化
doc = pq(filename='test.html')
print(doc('li'))
# 3、基本CSS选择器
# 返回的是PyQuery类型
# 实例
html = '''
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
'''
doc = pq(html)
# #container选择的是id为container的
# .list选择的是class为list的
# li直接选择li节点
print(doc('#container .list li'))
print(type(doc('#container .list li')))
# 4、查找节点
# *子节点
# 查找子节点,用到find()方法,传入的参数是CSS选择器
# find()的范围是所有子孙节点,如果只查找子节点,用children()方法
doc = pq(html)
items = doc('.list')
print(type(items))
print(items)
lis = items.find('li')
print(type(lis))
print(lis)
lis = items.children('.active')
print(type(lis))
print(lis)
# *父节点
# 用parent()方法,返回直接父节点
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
items = doc('.list')
container = items.parent()
print(type(container))
print(container)
# 用parents()方法,会返回所有的祖先节点
container = items.parents()
print(type(container))
print(container)
# 筛选某个祖先节点,可以传入CSS选择器
container = items.parents('.wrap')
print(type(container))
print(container)
# *兄弟节点
# 用siblings()方法,返回所有兄弟节点,可传入CSS选择器
doc = pq(html)
li = doc('.list .item-0.active')
print(li.siblings())
# *遍历
# 单个节点,可以直接打印输出,也可以转成字符串
doc = pq(html)
li = doc('.item-0.active')
print(li)
print(str(li))
# 多个节点,遍历,用items()方法,返回生成器类型
doc = pq(html)
lis = doc('li').items() # lis是generator类型
print(type(lis))
for li in lis:
print(li, type(li))
# 6、获取信息
# *获取属性
# 用attr()方法获取属性
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.attr('href'))
# 用attr属性获取属性
print(a.attr.href)
# 但是attr()只能得到第一个节点的属性,要获取所有a节点的属性,就要遍历
doc = pq(html)
a = doc('a')
for item in a.items():
print(item.attr('href'))
# *获取文本
# 总结:html()方法返回的是第一个节点的内部HTML文本,多个节点的结果,需要遍历
# text()方法返回的是所有节点取文本后合并成一个字符串,不需要遍历
# 获取其内部的文本,调用text()方法实现
# 此时会忽略掉节点内部包含的所有HTML,只返回纯文字内容
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
a = doc('.item-0.active')
print(li)
print(li.text())
# 获取这个节点内部的HTML文本,调用html()方法实现
li = doc('.item-0.active')
print(li)
print(li.html())
# 7、节点操作
# 对节点进行动态修改,如给节点添加一个class、移除某个节点等
# * addClass 和 removeClass
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.remove_class('active')
print(li)
li.add_class('active')
print(li)
# * attr、text和html
html = '''
<ul class="list">
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
</ul>
'''
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.attr('name', 'link')
print(li)
li.text('changed item')
print(li)
li.html('<span>changed item</span>')
print(li)
# *remove()
html = '''
<div class="wrap">
Hello, World
<p>This is a paragraph.</p>
</div>
'''
doc = pq(html)
wrap = doc('.wrap')
print(wrap.text())
# 只要Hello World
wrap.find('p').remove()
print(wrap.text())
# 8、伪类选择器
# CSS选择器之所以强大,是因为支持多种多样的伪类选择器
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
# 选择第一个li节点
li = doc('li:first-child')
print(li)
# 选择最后一个li节点
li = doc('li:last-child')
print(li)
# 选择第二个li节点
li = doc('li:nth-child(2)')
print(li)
# 选择第三个li之后的li节点
li = doc('li:gt(2)')
print(li)
# 选择偶数位置的li节点
li = doc('li:nth-child(2n)')
print(li)
# 选择包含second文本的li节点
li = doc('li:contains(second)')
print(li)
|
[
"406857586@qq.com"
] |
406857586@qq.com
|
c46eea6ac70388e3126470a5470b481d84d8b08e
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/waterQual/EPA/ntnModel/wrapCl.py
|
1059b5ef803b609b8ac5c26f307b198b30e4359e
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,772
|
py
|
import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, transform
# varC = usgs.varC
varC = ['00940']
siteNoLst = ['0422026250', '04232050', '0423205010']
nFill = 3
varG = gageII.lstWaterQuality
caseName = 'chloride'
# add a start/end date to improve efficiency.
t = pd.date_range(start='1979-01-01', end='2019-12-30', freq='W-TUE')
sd = t[0]
ed = t[-1]
td = pd.date_range(sd, ed)
rho = 50
# temp: read NTN
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
fileData = os.path.join(dirNTN, 'NTN-All-w.csv')
fileSite = os.path.join(dirNTN, 'NTNsites.csv')
tabData = pd.read_csv(fileData)
tabSite = pd.read_csv(fileSite)
tabData['siteID'] = tabData['siteID'].apply(lambda x: x.upper())
tabData = tabData.replace(-9, np.nan)
tab = tabData[tabData['siteID'] == 'NY43']
tab.index = pd.to_datetime(tab['dateon'])
weekday = tab.index.normalize().weekday
tab2 = pd.DataFrame(index=t)
tol = pd.Timedelta(3, 'D')
tab2 = pd.merge_asof(left=tab2, right=tab, right_index=True,
left_index=True, direction='nearest', tolerance=tol)
varPLst = ['ph', 'Conduc', 'Ca', 'Mg', 'K', 'Na', 'NH4', 'NO3', 'Cl', 'SO4']
dfP = tab2[varPLst]
# gageII
tabG = gageII.readData(varLst=varG, siteNoLst=siteNoLst)
tabG = gageII.updateCode(tabG)
# read data and merge to: f/q=[nT,nP,nX], g/c=[nP,nY]
fLst = list() # forcing ts
pLst = list() # concentrations in rainfall
gLst = list() # geo-const
qLst = list() # streamflow
cLst = list() # water quality
# cfLst = list() # water quality flags
infoLst = list()
t0 = time.time()
for i, siteNo in enumerate(siteNoLst):
t1 = time.time()
dfC = usgs.readSample(siteNo, codeLst=varC, startDate=sd)
dfQ = usgs.readStreamflow(siteNo, startDate=sd)
dfF = gridMET.readBasin(siteNo)
# merge to one table
df = pd.DataFrame({'date': td}).set_index('date')
df = df.join(dfC)
df = df.join(dfQ)
df = df.join(dfF)
df = df.rename(columns={'00060_00003': '00060'})
# convert to weekly
offset = pd.offsets.timedelta(days=-6)
dfW = df.resample('W-MON', loffset=offset).mean()
dfW = dfW.join(dfP)
dfC = dfW[varC].dropna(how='all')
for k in range(len(dfC)):
ct = dfC.index[k]
ctR = pd.date_range(
start=ct-pd.Timedelta(days=rho*7-1), end=ct, freq='W-TUE')
if (ctR[0] < sd) or (ctR[-1] > ed):
continue
tempQ = pd.DataFrame({'date': ctR}).set_index('date').join(
dfW['00060']).interpolate(limit=nFill, limit_direction='both')
tempF = pd.DataFrame({'date': ctR}).set_index('date').join(
dfW[gridMET.varLst+varPLst]).interpolate(limit=nFill, limit_direction='both')
qLst.append(tempQ.values)
fLst.append(tempF.values)
cLst.append(dfC.iloc[k].values)
gLst.append(tabG.loc[siteNo].values)
infoLst.append(dict(siteNo=siteNo, date=ct))
t2 = time.time()
print('{} on site {} reading {:.3f} total {:.3f}'.format(
i, siteNo, t2-t1, t2-t0))
q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)
f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)
g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)
c = np.stack(cLst, axis=-1).swapaxes(0, 1).astype(np.float32)
infoDf = pd.DataFrame(infoLst)
saveFolder = os.path.join(kPath.dirWQ, 'trainData')
saveName = os.path.join(saveFolder, caseName)
np.savez(saveName, q=q, f=f, c=c, g=g)
infoDf.to_csv(saveName+'.csv')
dictData = dict(name=caseName, rho=rho, nFill=nFill,
varG=varG, varC=varC, varQ=['00060'],
varF=gridMET.varLst+varPLst, siteNoLst=siteNoLst)
with open(saveName+'.json', 'w') as fp:
json.dump(dictData, fp, indent=4)
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
c4a71d58b51d50f238a0fcfefb454888e76cbac3
|
c3cf442e56969e98fbd392ee89bd85b3e22d5cd2
|
/python/Spider/github_login2.py
|
ddd07e038a2e21d86bbac1715e399e0fe3d6860d
|
[] |
no_license
|
Eacaen/diff_Code_Learn
|
bd4bd409f0027ab3d606ef029de9ae4a3af09775
|
e55619c5736181fd50666b61d06e6ed7cafc4394
|
refs/heads/master
| 2021-01-12T07:55:54.127018
| 2019-11-07T10:42:05
| 2019-11-07T10:42:05
| 77,052,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,942
|
py
|
# -*- coding:utf-8 -*-
import requests
import re
session = requests.Session()
# header = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, sdch, br",
# "Accept-Language": "zh-CN,zh;q=0.8",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Cookie": "_octo=GH1.1.1664649958.1449761838; _gat=1; logged_in=no; _gh_sess=eyJsYXN0X3dyaXRlIjoxNDcyODA4MTE1NzQ5LCJzZXNzaW9uX2lkIjoiZGU3OTQ1MWE0YjQyZmI0NmNhYjM2MzU2MWQ4NzM0N2YiLCJjb250ZXh0IjoiLyIsInNweV9yZXBvIjoiY25vZGVqcy9ub2RlY2x1YiIsInNweV9yZXBvX2F0IjoxNDcyODA3ODg0LCJyZWZlcnJhbF9jb2RlIjoiaHR0cHM6Ly9naXRodWIuY29tLyIsIl9jc3JmX3Rva2VuIjoiTllUd3lDdXNPZmtyYmRtUDdCQWtpQzZrNm1DVDhmY3FPbHJEL0U3UExGaz0iLCJmbGFzaCI6eyJkaXNjYXJkIjpbXSwiZmxhc2hlcyI6eyJhbmFseXRpY3NfbG9jYXRpb25fcXVlcnlfc3RyaXAiOiJ0cnVlIn19fQ%3D%3D--91c34b792ded05823f11c6fe8415de24aaa12482; _ga=GA1.2.1827381736.1472542826; tz=Asia%2FShanghai",
# "Host": "github.com",
# "Upgrade-Insecure-Requests": "1",
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
# }
header = {
"Accept" : "text/html,application/xhtml+x…lication/xml;q=0.9,*/*;q=0.8" ,
"Accept-Encoding" : "gzip, deflate, br",
"Accept-Language" : "en-US,en;q=0.5",
"Connection" : "keep-alive",
"Cookie" : "logged_in=no; _octo=GH1.1.1970970484.1500426888; _ga=GA1.2.1727967677.1500426888; _gh_sess=eyJsYXN0X3dyaXRlIjoxNTAxMjMyMzg5MDEyLCJzZXNzaW9uX2lkIjoiZThiNTIxZmFhYjdiNWMzZTVjNTY2YWY4MmU5MWJjNWQiLCJjb250ZXh0IjoiLyIsImxhc3RfcmVhZF9mcm9tX3JlcGxpY2FzIjoxNTAxMjMyMzkyMTEzLCJyZWZlcnJhbF9jb2RlIjoiaHR0cHM6Ly9naXRodWIuY29tLyIsIl9jc3JmX3Rva2VuIjoiQ2JkYjAxSGREZTVtcnJZU29GQ29aYzNabHZjWitCQmN6WFdKcDEwV2thaz0iLCJmbGFzaCI6eyJkaXNjYXJkIjpbXSwiZmxhc2hlcyI6eyJhbmFseXRpY3NfbG9jYXRpb25fcXVlcnlfc3RyaXAiOiJ0cnVlIn19fQ%3D%3D--59c4346f810a2bd6b496962bda680907c92ba032; tz=Asia%2FShanghai; _gat=1",
"Host" : "github.com" ,
"Upgrade-Insecure-Requests" : "1",
"User-Agent" :"Mozilla/5.0 (X11; Ubuntu; Lin… Gecko/20100101 Firefox/54.0" ,
"Content-Type" : "application/x-www-form-urlencoded",
# "Content-Length" : "182",
"Referer" : "https://github.com",
}
def getToken():
html = session.get('https://github.com/login', headers=header)
pattern = re.compile(r'<input name="authenticity_token" type="hidden" value="(.*)" />')
authenticity_token = pattern.findall(html.content)[0]
print authenticity_token
return authenticity_token
def userpwdLogin():
payload = {
"login" : "Eacaen",
"password" : "HTy119110315",
'commit': 'Sign+in',
'authenticity_token': getToken(),
'utf8': '%E2%9C%93'}
r = session.post('https://github.com/session', data=payload, headers=header)
print r.status_code
print r.content #login success
userpwdLogin()
|
[
"501556184@qq.com"
] |
501556184@qq.com
|
dab468facc509b0bc4a17bf71d78d2f64e565972
|
0689ad04900b45e6ffb85756e65e96f30781558b
|
/py44/数据/day06/demo03_vectorize.py
|
53433c685f1e8058eb2bb0adb205b8acc6cb2766
|
[] |
no_license
|
lizhihui16/aaa
|
a5452b5d0de4c2ad6342fce1b8aef278d2d2943e
|
e8c38e012f6aa0bc05ac6481d6c3e2b4e9013b56
|
refs/heads/master
| 2020-04-24T01:05:19.266060
| 2019-02-20T01:43:51
| 2019-02-20T01:43:51
| 171,586,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
'''
vectorize矢量化案例
'''
import numpy as np
import math as m
def foo(x, y):
return m.sqrt(x**2 + y**2)
x, y = 3, 4
print(foo(x, y))
x = np.array([3, 4, 5, 6])
y = np.array([4, 5, 6, 7])
# z = foo(x, y) 错误
# 把foo函数矢量化处理
foo_v = np.vectorize(foo)
print(foo_v(x, y))
# 使用frompyfunc方法矢量化函数
# foo需要2个参数, 最终将会有1个返回值
foo_f = np.frompyfunc(foo, 2, 1)
print(foo_f(x, y))
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
9012a4c3c7502633f1df59574ab7602af3edaaeb
|
533c298a21e865d190e69b0c95a0f9ecd9dd8d8b
|
/reviewboard/__init__.py
|
016a72ad23705cdef9d791d57c91bf2bda9806f0
|
[
"MIT"
] |
permissive
|
djs/reviewboard
|
cb78573890b821cbc228fb43a1bdb8e337d5e9d5
|
813158fbb31d7889e224f3fc1350fd4a791874ec
|
refs/heads/master
| 2021-01-15T22:41:24.101928
| 2009-09-23T09:54:41
| 2009-09-23T09:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
# The version of Review Board.
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (1, 1, 0, 'alpha', 2, False)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
def initialize():
"""Begins initialization of Review Board.
This sets up the logging, generates cache serial numbers, and then
fires an initializing signal that other parts of the codebase can
connect to. This must be called for such features as e-mail notification
to work.
"""
import logging
import os
from djblets.util.misc import generate_cache_serials
from djblets import log
from reviewboard import signals
# Set up logging.
log.init_logging()
logging.info("Log file for Review Board v%s (PID %s)" %
(get_version_string(), os.getpid()))
# Generate cache serials
generate_cache_serials()
signals.initializing.send(sender=None)
|
[
"chipx86@chipx86.com"
] |
chipx86@chipx86.com
|
4e0e82716584b6f00ebb5773c0e041000aa55a11
|
1d9138d777744fa2d9d6e3b629a43041f2358d06
|
/real_time/abc/118/B.py
|
0cbaafe88c142600626cc3f11fe6341f0d44f97f
|
[] |
no_license
|
Yuyats/AtCoderAnswers
|
f1956b790ee64a4d0b3b48b98791a91679a30244
|
fac7e3eb74a888e77ba7a6b6a15d836c589baa3e
|
refs/heads/master
| 2021-06-24T16:19:45.848524
| 2021-06-13T03:51:07
| 2021-06-13T03:51:07
| 198,857,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import math, string, itertools, fractions, heapq, collections, re, array, bisect, sys, random, time, copy, functools
sys.setrecursionlimit(10**7)
inf = 10 ** 20
eps = 1.0 / 10**10
mod = 10**9+7
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]
ddn = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def pf(s): return print(s, flush=True)
def main():
N, M = LI()
KA = []
[KA.append(LI()) for i in range(N)]
result = 0
for i in KA[0][1:]:
if all([i in j[1:] for j in KA]):
result += 1
print(result)
main()
|
[
"unitednum@gmail.com"
] |
unitednum@gmail.com
|
8b2fb070f5bd9c5bfb41af82e806c1cdd09c1850
|
e764c69d09cb69653817df8fa410ce7a31dd5d1d
|
/residuosWebapp/residuos/models.py
|
95607856e983b103db5e578608aeadf61d6b7687
|
[] |
no_license
|
fafaschiavo/residuosWebbapp
|
5620d60a933e3894864c89de232ebebf11df6a5f
|
f1915bc1f136801e96c5bf01bd7d5127eddb8551
|
refs/heads/master
| 2021-01-20T17:12:27.770482
| 2016-08-14T22:26:03
| 2016-08-14T22:26:03
| 65,656,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class members(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
newsletter = models.IntegerField(default=1)
is_adm = models.IntegerField(default=0)
company_id = models.IntegerField(default=0)
def __first_name__(self):
return self.first_name
def __last_name__(self):
return self.last_name
def __email__(self):
return self.email
def __phone__(self):
return self.phone
def __created_at__(self):
return self.created_at
def __newsletter__(self):
return self.newsletter
def __is_adm__(self):
return self.is_adm
class company(models.Model):
company_name = models.CharField(max_length=400)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
cnpj = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
zip_code = models.CharField(max_length=200)
address = models.CharField(max_length=200)
def __company_name__(self):
return self.company_name
def __email__(self):
return self.email
def __phone__(self):
return self.phone
def __cnpj__(self):
return self.cnpj
def __created_at__(self):
return self.created_at
def __zip_code__(self):
return self.zip_code
def __address__(self):
return self.address
|
[
"fayschiavo@gmail.com"
] |
fayschiavo@gmail.com
|
33c85c8bf476a439d5dacd0afbbd365c0df5f844
|
fb65b7c000642dca68c93ee85a87795b3f30fe21
|
/Advance_Python/Quantifiers/Rule4.py
|
2c441191c03a5512a02d8b5f8398a98bb93e4222
|
[] |
no_license
|
toncysara17/luminarpythonprograms
|
f41b446251feba641e117d87ce235dc556086f8f
|
17bc37c3f83c0e9792aaa8bccd901371a6413f14
|
refs/heads/master
| 2023-04-17T18:51:31.493118
| 2021-04-20T05:25:02
| 2021-04-20T05:25:02
| 358,550,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
#Quantifiers
import re
x = "a{3}" #no of a position
r="aaa abc aaaa cga"
matcher=re.finditer(x,r)
for match in matcher:
print(match.start())
print(match.group())
|
[
"toncysara12@gmail.com"
] |
toncysara12@gmail.com
|
02ca81936a9bbc323cdc7593087daf093dfe7a6a
|
dc0d7e49eafe40f1c41f631621a6ccdefdcbbf7c
|
/press/log.py
|
fd7a77624aba7893cf089a1fce44ac9a436ccd5f
|
[] |
no_license
|
jr0d/press
|
b2314b319da5b44d23110036064775796246c5c1
|
477b78700b644b2d333f4d9289f319a52fc54100
|
refs/heads/master
| 2021-06-15T20:44:18.061919
| 2019-04-24T17:01:37
| 2019-04-24T17:01:37
| 80,559,927
| 7
| 3
| null | 2021-03-25T21:49:09
| 2017-01-31T20:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
import logging
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def setup_logging(log_level=logging.ERROR,
console_logging=True,
log_file=None,
cli_debug=False):
press_logger = logging.getLogger('press')
press_cli_logger = logging.getLogger('press.helpers.cli')
if console_logging: # True unless explicitly untrue
stream_handler = logging.StreamHandler()
press_logger.addHandler(stream_handler)
press_logger.setLevel(log_level)
if log_file:
fh = logging.FileHandler(log_file)
fh.setFormatter(logging.Formatter(fmt=FORMAT))
press_logger.info('Setting log file: {}'.format(log_file))
press_logger.addHandler(fh)
if not cli_debug:
press_cli_logger.setLevel(logging.ERROR)
|
[
"jared.rodriguez@rackspace.com"
] |
jared.rodriguez@rackspace.com
|
6ab505a1ac637cbf578adba0cb1b1eb19c59b563
|
4ad94b71e30883d6df07a3277265bd6fb7457ba7
|
/python/examples/doc_examples/plot/axis_title_3d.py
|
e81c6c337eadf7fd0d7458a698deea9e1388cc48
|
[
"MIT"
] |
permissive
|
Tecplot/handyscripts
|
7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c
|
84a89bfecff5479a0319f08eb8aa9df465283830
|
refs/heads/master
| 2023-08-22T15:29:22.629644
| 2023-08-12T01:19:59
| 2023-08-12T01:19:59
| 149,826,165
| 89
| 64
|
MIT
| 2022-01-13T01:11:02
| 2018-09-21T22:47:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
from os import path
import tecplot as tp
from tecplot.constant import PlotType, SurfacesToPlot, Color, AxisTitleMode
examples_dir = tp.session.tecplot_examples_directory()
infile = path.join(examples_dir, 'SimpleData', 'F18.plt')
dataset = tp.data.load_tecplot(infile)
plot = tp.active_frame().plot(PlotType.Cartesian3D)
plot.activate()
plot.show_contour = True
plot.contour(0).variable = dataset.variable('S')
plot.contour(0).colormap_name = 'Sequential - Yellow/Green/Blue'
plot.contour(0).legend.show = False
plot.fieldmap(0).surfaces.surfaces_to_plot = SurfacesToPlot.BoundaryFaces
xaxis = plot.axes.x_axis
xaxis.show = True
#{DOC:highlight}[
xaxis.title.title_mode = AxisTitleMode.UseText
xaxis.title.text = 'Longitudinal (m)'
xaxis.title.color = Color.BluePurple
xaxis.title.position = 10
#]
yaxis = plot.axes.y_axis
yaxis.show = True
#{DOC:highlight}[
yaxis.title.title_mode = AxisTitleMode.UseText
yaxis.title.text = 'Transverse (m)'
yaxis.title.color = Color.BluePurple
yaxis.title.position = 90
#]
zaxis = plot.axes.z_axis
zaxis.show = True
#{DOC:highlight}[
zaxis.title.title_mode = AxisTitleMode.UseText
zaxis.title.text = 'Height (m)'
zaxis.title.color = Color.BluePurple
zaxis.title.offset = 13
#]
plot.view.fit()
tp.export.save_png('axis_title_3d.png', 600, supersample=3)
|
[
"55457608+brandonmarkham@users.noreply.github.com"
] |
55457608+brandonmarkham@users.noreply.github.com
|
5a9c07053f256cb8360b535a35fb9b97ed2bcae8
|
c652797f5303bb7102967fc6603e5704025afb36
|
/gamelayer/uitools/textline.py
|
179db7b1680cae5dd59791572dfc2579cff6863c
|
[
"MIT"
] |
permissive
|
Windspar/Gamelayer
|
fc1ce499cccb6530a4dcd446f9d86fd44026e564
|
65e1cf11548bc02bc49348eb265c209172c14844
|
refs/heads/master
| 2022-06-13T08:06:37.828771
| 2020-05-07T17:17:59
| 2020-05-07T17:17:59
| 258,047,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,569
|
py
|
import string
from pygame import KMOD_CTRL, Rect, KEYDOWN, MOUSEMOTION, MOUSEBUTTONDOWN
from .label import Label
from .ui_base import UI_Base
from .textline_core import *
class TextLine(UI_Base):
def __init__(self, font, color, callback, rect, allowed_keys=None, *groups):
UI_Base.__init__(self, rect, (0, 0), "topleft", *groups)
self.callback = callback
self._left = 0
self._right = 0
self._offset = 2
if allowed_keys:
self.allowed_keys = allowed_keys
else:
self.allowed_keys = string.digits + string.ascii_letters + string.punctuation + " "
self.recall = Recall()
position = self.rect.x + self._offset, self.rect.centery
self.carrot = Carrot("|", font, color, position)
self.carrot.set_callback(self.build_image)
self.buffer = Buffer(self.carrot, self.recall, callback)
self.label = Label("", font, color, position, "midleft")
self.label.set_apply_image(self.build_image)
def bind(self, events):
events.bind(KEYDOWN, self.on_keydown)
events.bind(MOUSEMOTION, self.on_mousemotion)
events.bind(MOUSEBUTTONDOWN, self.on_mousebuttondown)
def build_image(self, tool=None):
self.image = self.build_surface()
self.label.draw_to(self.image, self.rect)
self.carrot.draw_to(self.image, self.rect)
self.apply_image()
def draw(self, surface):
self.label.draw(surface)
self.carrot.draw(surface)
def on_keydown(self, event):
if self._toggle:
self.carrot.show(True)
ctrl = event.mod & KMOD_CTRL
if ctrl == 0 and event.unicode in self.allowed_keys and event.unicode != "":
self.buffer.insert(self.carrot.position, event.unicode)
self.carrot.position += 1
self.update_text()
elif ctrl == 0:
if event.key in self.buffer.key_events.keys():
self.buffer.key_events[event.key]()
self.update_text()
def on_mousebuttondown(self, event):
self._toggle = False
if event.button == 1:
if self._hover:
self._toggle = True
if not self.carrot._enable:
self.carrot.enable(True)
if not self._toggle:
self.carrot.enable(False)
self.apply_image()
def update(self, delta):
self.carrot.update(delta)
def update_text(self):
if not self.buffer.empty():
text = self.buffer.text
font = self.label._font
width = self.rect.width - self._offset * 3
self.label.set_text(text)
if self.carrot.position > self._right:
self._right = self.carrot.position
elif self.carrot.position < self._left:
self._left = self.carrot.position
# Looking for clipping text best fit. Base on carrot position
# Move left position to the left.
while font.size(text[self._left:self._right])[0] < width and self._left > 0:
self._left -= 1
# Move left position to the right.
while font.size(text[self._left:self._right])[0] > width and self._left < self.carrot.position:
self._left += 1
# Move right position to right.
while font.size(text[self._left:self._right])[0] < width and self._right < len(self.buffer):
self._right += 1
# Move right position to left.
while font.size(text[self._left:self._right])[0] > width:
self._right -= 1
label_x = self.label.rect.x - 1
x = font.size(text[0: self._left])[0]
w = min(width, self.label.rect.width - x)
# Smooth scrolling effect
if w < width < self.label.rect.width:
offset = width - (self.label.rect.width - x)
x -= offset
w += offset
label_x += offset
# Clip rect
clip_rect = Rect(x, 0, w, self.label.rect.height)
# Carrot position
slice = text[self._left:self.carrot.position]
self.carrot.rect.x = font.size(slice)[0] + label_x
# Must set label clip rect. After setting carrot x position.
# For image is update correctly.
self.label.clip(clip_rect)
else:
self.carrot.rect.x = self.label.rect.x
self.label.set_text("")
|
[
"kdrakemagi@gmail.com"
] |
kdrakemagi@gmail.com
|
20aed6156cab0fb01197eb7232f5d902cc34d1ae
|
5023f3f6f493a6cf3a6e4acf7ee742fdecc2a558
|
/ScopeFoundryHW/newport_esp300/esp300_xyz_stage_hw.py
|
ea5170c66505e93c87a73e26db3c8a6b14c200da
|
[
"BSD-3-Clause"
] |
permissive
|
erictang000/stackbot
|
1a0de1a30c0b17a67808cbb7f084149f0c744070
|
e9a20930d790c995163192b29394a266af54a3d0
|
refs/heads/master
| 2022-04-10T06:48:25.785204
| 2020-03-18T23:08:57
| 2020-03-18T23:08:57
| 248,362,086
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,673
|
py
|
from ScopeFoundry.hardware import HardwareComponent
class ESP300XYZStageHW(HardwareComponent):
name = 'esp300_xyz_stage'
def __init__(self, app, debug=False, name=None, ax_names='xyz'):
"""
ax_names defines the names of the three axes connected to the stage.
if an "_" underscore is found, that axis will be skipped.
May be any iterable. examples include 'xyz' or ['r', 'theta', 'phi']
"""
self.ax_names = ax_names
HardwareComponent.__init__(self, app, debug=debug, name=name)
def setup(self):
self.settings.New('port', str, initial='COM5')
for axis in self.ax_names:
if axis == '_' or axis == None:
continue
self.settings.New(axis + "_position",
dtype=float,
ro=True,
unit='mm',
spinbox_decimals=6,
si=False
)
#self.settings.New(axis + "_ref_position", dtype=float, ro=True, unit='nm')
self.settings.New(axis + "_target_position",
dtype=float,
ro=False,
vmin=-20,
vmax=20,
unit='mm',
spinbox_decimals=6,
spinbox_step=0.01,
si=False)
self.settings.New(axis + '_enabled', bool, initial=True)
self.settings.New(axis + '_is_moving', bool, ro=True)
self.settings.New(axis + "_step_delta", dtype=float, unit='m', si=True, initial=100e-6, vmin=0 )
def connect(self):
S = self.settings
from .esp300_dev import ESP300
E = self.esp300 = ESP300(port=S['port'], debug=S['debug_mode'])
for axis_index, axis_name in enumerate(self.ax_names):
axis_num = axis_index + 1
# skip axes that are excluded from ax_names
if axis_name == '_' or axis_name == None:
continue
unit = E.read_unit(axis_num)
self.settings.get_lq(axis_name + "_position").change_unit(unit)
self.settings.get_lq(axis_name + "_target_position").change_unit(unit)
self.settings.get_lq(axis_name + "_position").connect_to_hardware(
lambda a=axis_num: E.read_pos(a))
self.settings.get_lq(axis_name + "_target_position").connect_to_hardware(
write_func = lambda new_pos, a=axis_num: E.write_target_pos_abs(a, new_pos))
self.settings.get_lq(axis_name + "_enabled").connect_to_hardware(
read_func = lambda a=axis_num: E.read_enabled(a),
write_func = lambda enabled, a=axis_num: E.write_enabled(a, enabled))
self.settings.get_lq(axis_name + "_is_moving").connect_to_hardware(
read_func = lambda a=axis_num: E.read_is_moving(a))
def disconnect(self):
self.settings.disconnect_all_from_hardware()
if hasattr(self, 'esp300'):
self.esp300.close()
del self.esp300
def move_step_delta(self, axname, dir=+1):
"dir should be +/- 1"
dir = dir * 1.0/ abs(dir)
self.settings[axname + "_target_position"] += dir * self.settings[axname + '_step_delta']
|
[
"esbarnard@lbl.gov"
] |
esbarnard@lbl.gov
|
761694d396861a5c422b785015b5680bb34787ac
|
81f6fd135813f3727576bd5d74acaf0469b53615
|
/test/test_variables_api.py
|
92de66d295e33ba672eaac9f15c6edcb303a029b
|
[] |
no_license
|
rlisowski/phrase-python
|
cb65ded1e80d1985aa95a4403c7aa3f012bd33b4
|
cbd6bf580a74140928b7536bb9b466d43276cc29
|
refs/heads/master
| 2023-06-18T09:24:43.916142
| 2021-07-15T14:21:58
| 2021-07-15T14:21:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
# coding: utf-8
"""
Phrase API Reference
The version of the OpenAPI document: 2.0.0
Contact: support@phrase.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import phrase_api
from phrase_api.api.variables_api import VariablesApi # noqa: E501
from phrase_api.rest import ApiException
class TestVariablesApi(unittest.TestCase):
"""VariablesApi unit test stubs"""
def setUp(self):
self.api = phrase_api.api.variables_api.VariablesApi() # noqa: E501
def tearDown(self):
pass
def test_variable_create(self):
"""Test case for variable_create
Create a variable # noqa: E501
"""
pass
def test_variable_delete(self):
"""Test case for variable_delete
Delete a variable # noqa: E501
"""
pass
def test_variable_show(self):
"""Test case for variable_show
Get a single variable # noqa: E501
"""
pass
def test_variable_update(self):
"""Test case for variable_update
Update a variable # noqa: E501
"""
pass
def test_variables_list(self):
"""Test case for variables_list
List variables # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"support@phrase.com"
] |
support@phrase.com
|
673e7667066dc50650cfcf844997ca18b98537de
|
4ba18540bfd8c523fe39bbe7d6c8fa29d4ec0947
|
/atlas/testing/auth_acceptance/config.py
|
8cd7cc13ffa195f811cd91010896c159bec16db3
|
[
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MPL-2.0"
] |
permissive
|
yottabytt/atlas
|
c9d8ef45a0921c9f46d3ed94d42342f11488a85e
|
b040e574fbc64c833039b003f8a90345dd98e0eb
|
refs/heads/master
| 2022-10-14T11:12:12.311137
| 2020-06-13T13:19:35
| 2020-06-13T13:19:35
| 272,008,756
| 0
| 0
|
Apache-2.0
| 2020-06-13T12:55:29
| 2020-06-13T12:55:28
| null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
# separates test runs
from uuid import uuid4
TEST_UUID = uuid4()
def set_foundations_home():
import os
os.environ["FOUNDATIONS_HOME"] = os.getcwd() + "/auth_acceptance/foundations_home"
os.environ["FOUNDATIONS_COMMAND_LINE"] = "True"
def _flattened_config_walk():
import os
import os.path as path
for dir_name, _, files in os.walk("auth_acceptance/foundations_home"):
for file_name in files:
if file_name.endswith(".envsubst.yaml"):
yield path.join(dir_name, file_name)
def _load_execution_config():
from foundations_core_cli.typed_config_listing import TypedConfigListing
from foundations_internal.config.execution import translate
TypedConfigListing("execution").update_config_manager_with_config(
"default", translate
)
def _config():
import os
import subprocess
for env_var in ["FOUNDATIONS_HOME"]:
if not os.environ.get(env_var, None):
print(f"{env_var} was not set")
exit(1)
for template_file_name in _flattened_config_walk():
output_file_name = template_file_name[: -len(".envsubst.yaml")] + ".yaml"
subprocess.run(
f"envsubst < {template_file_name} > {output_file_name}", shell=True
)
# _load_execution_config()
def setup_auth_home_config():
set_foundations_home()
_config()
|
[
"mislam@squareup.com"
] |
mislam@squareup.com
|
aa94f2a5beb0b786f90536824232dccead006413
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/zhzd_3_20190605095859.py
|
fff25287104a8fd3c22d7bf52709955151761a05
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd')#txt目录提取
dis = open(r'C:\Users\Administrator\Desktop\ICD-10.txt',errors='ignore')
ds=dis.readlines()
ds_cs = []
for line in ds:
line = re.sub('\n','',line)
ds_cs.append(line)
ryzd=[]
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
pattern =r'\s*\d+、+\s?(.*)'
c=re.compile(pattern)
output=[]
for line in f.readlines():
line1=line.strip('\n')
line2 = ''.join(line1)
line2 = line2.strip( )
line3=c.findall(line2)
line3=''.join(line3)
line4 = str(line3)
out = line4
out= re.sub(r'右侧|两侧|双侧|左侧|右|左|双','',out)
out = re.sub(r'肺肺','肺',out)
out = re.sub('(.*?)', '', out)
for ds in ds_cs:
if EMRdef.SBS(out,ds) > 0.8:
output.append(out)
output=EMRdef.delre(output)
output1='\n'.join(output)
EMRdef.text_create(r'D:\DeepLearning ER\EHRzhzd2','.txt',emrpath,output1)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
f25a62b621331ffbb01cb7d174dcc64601a12e56
|
032a1ad3c94e1126729417a16e2a95743d121244
|
/cell_fitting/optimization/evaluation/plot_sine_stimulus/when_doublet_start.py
|
d9a7d6b7ffa9710e6429e0347cd53e945c59af5e
|
[] |
no_license
|
cafischer/cell_fitting
|
0fd928f5ae59488e12c77648c2e6227c1911d0e9
|
75a81987e1b455f43b5abdc8a9baf6b8f863bee2
|
refs/heads/master
| 2021-01-23T19:27:30.635173
| 2019-09-14T08:46:57
| 2019-09-14T08:46:57
| 44,301,986
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
from __future__ import division
import matplotlib.pyplot as pl
from matplotlib.patches import Rectangle
from matplotlib.colors import Normalize
import numpy as np
import os
from nrn_wrapper import Cell
from cell_fitting.optimization.evaluation.plot_sine_stimulus import simulate_sine_stimulus
from cell_characteristics.analyze_APs import get_AP_onset_idxs
from cell_fitting.util import init_nan
from cell_characteristics import to_idx
pl.style.use('paper')
if __name__ == '__main__':
# parameters
save_dir = '/home/cf/Phd/programming/projects/cell_fitting/cell_fitting/results/best_models/1'
model_dir = os.path.join(save_dir, 'cell.json')
mechanism_dir = '../../../model/channels/vavoulis'
# load model
cell = Cell.from_modeldir(model_dir, mechanism_dir)
# parameters
AP_threshold = -10
amp1 = 0.6
sine1_dur = 1000
onset_dur = 500
offset_dur = 500
dt = 0.01
d_amp = 0.1
amp2s = np.arange(0.1, 1.0+d_amp, d_amp)
d_freq = 2
freq2s = np.arange(3, 15+d_freq, d_freq)
ISI_first = init_nan((len(amp2s), len(freq2s)))
save_dir_img = os.path.join(save_dir, 'img', 'sine_stimulus', 'when_doublet', 'start',
'amp1_'+str(amp1) + '_dur1_'+str(sine1_dur))
if not os.path.exists(save_dir_img):
os.makedirs(save_dir_img)
for i, amp2 in enumerate(amp2s):
for j, freq2 in enumerate(freq2s):
v, t, _ = simulate_sine_stimulus(cell, amp1, amp2, sine1_dur, freq2, onset_dur, offset_dur, dt)
onsets = get_AP_onset_idxs(v, AP_threshold) # use only period in the middle
if len(onsets) >= 2:
if (onsets[1] - onsets[0]) * dt < 1/2 * 1/freq2 * 1000:
ISI_first[i, j] = (onsets[1] - onsets[0]) * dt
print ISI_first[i, j]
pl.figure(figsize=(18, 8))
pl.plot(t, v, 'k', linewidth=1.0)
pl.xlabel('Time (ms)')
pl.ylabel('Membrane Potential (mV)')
pl.ylim(-95, 55)
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'v_'+str(amp2)+'_'+str(freq2)+'.png'))
#pl.show()
# plot
cmap = pl.get_cmap('viridis')
ISI_max = 15
norm = Normalize(vmin=0, vmax=ISI_max)
fig, ax = pl.subplots()
for i, amp2 in enumerate(amp2s):
for j, freq2 in enumerate(freq2s):
if not np.isnan(ISI_first[i, j]):
if ISI_first[i, j] > ISI_max:
w = d_amp / 2
h = d_freq / 6
ax.add_patch(Rectangle((amp2 - w / 2, freq2 - h / 2), w, h, color='r'))
else:
c = cmap(norm(ISI_first[i, j]))
w = d_amp/2
h = d_freq/6
ax.add_patch(Rectangle((amp2-w/2, freq2-h/2), w, h, color=c))
pl.xlim(amp2s[0]-d_amp/2, amp2s[-1]+d_amp/2)
pl.ylim(freq2s[0]-d_freq/2, freq2s[-1]+d_freq/2)
pl.xlabel('Amplitude (nA)')
pl.ylabel('Frequency (Hz)')
sm = pl.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.array([0, ISI_max]))
cb = pl.colorbar(sm)
cb.ax.get_yaxis().labelpad = 20
cb.ax.set_ylabel('$ ISI_{2nd-1st}$', rotation=270)
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'ISI.png'))
#pl.show()
|
[
"coralinefischer@gmail.com"
] |
coralinefischer@gmail.com
|
df74c510b2fa1f4bec7ac08c8ae445e9eb2ce365
|
f259ca399ab33b5c2e66ae07921711ea5917ac9e
|
/pytorch/sphere20a.py
|
d4ce73637194c4236b20b4eb2bb1a4d6717c6d89
|
[] |
no_license
|
jizhuoran/HyperTea_Maker
|
9a7930e1d6af995c8fdb9a15354eea5fc29f0806
|
2c3f8dfcb699495093165cd986eebedfb17a2433
|
refs/heads/master
| 2020-04-22T19:32:39.385611
| 2019-04-14T15:12:06
| 2019-04-14T15:12:48
| 170,610,900
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
import torch.nn as nn
import torch
class sphere20a(nn.Module):
def __init__(self,classnum=10574,feature=False):
super(sphere20a, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6,512)
self.fc6 = nn.Linear(512,self.classnum)
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0),-1)
x = self.fc5(x)
x = self.fc6(x)
return x
|
[
"jizr@connect.hku.hk"
] |
jizr@connect.hku.hk
|
42978fcaa46628548561391c85f29c13b5e7dd6d
|
44600adf1731a449ff2dd5c84ce92c7f8b567fa4
|
/colour_down/examples/plotting/examples_volume_plots.py
|
769af73894ba737a07e58c6c32c7848950048d7f
|
[] |
no_license
|
ajun73/Work_Code
|
b6a3581c5be4ccde93bd4632d8aaaa9ecc782b43
|
017d12361f7f9419d4b45b23ed81f9856278e849
|
refs/heads/master
| 2020-04-11T23:16:43.994397
| 2019-12-28T07:48:44
| 2019-12-28T07:48:44
| 162,161,852
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
# -*- coding: utf-8 -*-
"""
Showcases colour models volume and gamut plotting examples.
"""
import numpy as np
from colour.plotting import (RGB_colourspaces_gamuts_plot, RGB_scatter_plot,
colour_plotting_defaults)
from colour.utilities import message_box
message_box('Colour Models Volume and Gamut Plots')
colour_plotting_defaults()
message_box(('Plotting "ITU-R BT.709" RGB colourspace volume in "CIE xyY" '
'colourspace.'))
RGB_colourspaces_gamuts_plot(
('ITU-R BT.709', ), reference_colourspace='CIE xyY')
print('\n')
message_box(('Comparing "ITU-R BT.709" and "ACEScg" RGB colourspaces volume '
'in "CIE L*a*b*" colourspace.'))
RGB_colourspaces_gamuts_plot(
('ITU-R BT.709', 'ACEScg'),
reference_colourspace='CIE Lab',
style={
'face_colours': (None, (0.25, 0.25, 0.25)),
'edge_colours': (None, (0.25, 0.25, 0.25)),
'edge_alpha': (1.0, 0.1),
'face_alpha': (1.0, 0.0)
})
print('\n')
message_box(('Plotting "ACEScg" colourspaces values in "CIE L*a*b*" '
'colourspace.'))
RGB = np.random.random((32, 32, 3))
RGB_scatter_plot(
RGB,
'ACEScg',
reference_colourspace='CIE Lab',
colourspaces=('ACEScg', 'ITU-R BT.709'),
face_colours=((0.25, 0.25, 0.25), None),
edge_colours=((0.25, 0.25, 0.25), None),
edge_alpha=(0.1, 0.5),
face_alpha=(0.1, 0.5),
grid_face_colours=(0.1, 0.1, 0.1),
grid_edge_colours=(0.1, 0.1, 0.1),
grid_edge_alpha=0.5,
grid_face_alpha=0.1)
|
[
"ajun73@gmail.com"
] |
ajun73@gmail.com
|
ead60febeb04e387de8528926f63dddb77c1025d
|
d27b030ce654d523b266821080acb246d71a85af
|
/PDB/clrender.py
|
b5283cb280bcb2552552a4dac8d1945ddc356746
|
[] |
no_license
|
amiller/graphicsii
|
9b6d638591a8df3267865a1be83cb1591586f662
|
da6cc6347d2b1f344056b71358a4b5b8efabdb77
|
refs/heads/master
| 2016-09-03T06:23:42.297039
| 2011-05-02T02:39:15
| 2011-05-02T02:39:15
| 1,689,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,923
|
py
|
import pyglet.gl
from OpenGL.GL import *
from OpenGL.GLU import *
from molecule import Molecule
import pyopencl as cl
import numpy as np
def print_info(obj, info_cls):
for info_name in sorted(dir(info_cls)):
if not info_name.startswith("_") and info_name != "to_string":
info = getattr(info_cls, info_name)
try:
info_value = obj.get_info(info)
except:
info_value = "<error>"
print "%s: %s" % (info_name, info_value)
platform = cl.get_platforms()[0]
device = platform.get_devices()[0]
context = cl.Context([device])
print_info(context.devices[0], cl.device_info)
queue = cl.CommandQueue(context,
properties = cl.command_queue_properties.PROFILING_ENABLE)
mf = cl.mem_flags
N = 512
class CLRender(object):
angles = [0,0,0]
scale = 1
mol = None
env_buf = None
def __init__(self):
self.dst = np.empty((N,N,4)).astype(np.uint8)
self.dst_buf = cl.Buffer(context, mf.WRITE_ONLY, self.dst.nbytes)
self.inv_matrix = cl.Buffer(context, mf.READ_ONLY, 16 * 4)
self.matrix = cl.Buffer(context, mf.READ_ONLY, 16 * 4)
with open('kernel.cl','r') as f:
self.program = cl.Program(context, f.read()).build("-cl-mad-enable")
print self.program.get_build_info(context.devices[0], cl.program_build_info.LOG)
self.dstTex = glGenTextures(1);
glBindTexture(GL_TEXTURE_2D, self.dstTex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, N, N, 0, GL_RGBA, GL_UNSIGNED_BYTE, None);
glBindTexture(GL_TEXTURE_2D, 0);
print_info(self.program, cl.program_info)
print_info(self.program.pdbTracer, cl.kernel_info)
grid = np.array(range(256),dtype=np.float32)/256
x1,x2 = np.meshgrid(grid, grid)
rad = np.sqrt(x1)
phi = 2*np.pi * x2
phimap = np.dstack((np.cos(phi)*rad, np.sin(phi)*rad, np.sqrt(1-rad*rad), 0*rad))
self.p = phimap
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
self.phimap = cl.Image(context, mf.READ_ONLY | mf.COPY_HOST_PTR, fmt,
shape=phimap.shape[:2], hostbuf=np.array(phimap, order='C'))
def applySceneTransforms(self):
gluLookAt(0, 0, 2*self.mol.radius, 0, 0, 0, 0, 1, 0); # Push molecule away from the origin along -Z direction.
glScalef(self.scale,self.scale,self.scale);
def mouse_rotate(xAngle, yAngle, zAngle):
glRotatef(xAngle, 1.0, 0.0, 0.0);
glRotatef(yAngle, 0.0, 1.0, 0.0);
glRotatef(zAngle, 0.0, 0.0, 1.0);
mouse_rotate(self.angles[0],self.angles[1],self.angles[2]);
glTranslatef(-self.mol.x, -self.mol.y, -self.mol.z); # Bring molecue center to origin
def render(self):
glBindTexture(GL_TEXTURE_2D, self.dstTex)
glEnable(GL_TEXTURE_2D)
glBegin(GL_QUADS)
glTexCoord2f( 0.0, 0.0 ); glVertex3f( -1.0, -1.0, -1.0 )
glTexCoord2f( 0.0, 1.0 ); glVertex3f( -1.0, 1.0, -1.0 )
glTexCoord2f( 1.0, 1.0 ); glVertex3f( 1.0, 1.0, -1.0 )
glTexCoord2f( 1.0, 0.0 ); glVertex3f( 1.0, -1.0, -1.0 )
glEnd()
glDisable(GL_TEXTURE_2D)
def compute(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
self.applySceneTransforms()
mat = np.array(glGetFloat(GL_MODELVIEW_MATRIX).transpose(), order='C')
glPopMatrix()
inv = np.array(np.linalg.inv(mat), order='C')
e1 = cl.enqueue_write_buffer(queue, self.matrix, mat)
e2 = cl.enqueue_write_buffer(queue, self.inv_matrix, inv)
e3 = self.program.pdbTracer(queue, self.dst.shape[:2], self.dst_buf,
self.matrix, self.inv_matrix,
np.array(len(self.mol.spheres)), self.spheredata,
self.envmap, self.phimap, self.sampler)
e4 = cl.enqueue_read_buffer(queue, self.dst_buf, self.dst)
queue.finish()
e4.wait()
for e in [e3]:
print (e.profile.END - e.profile.START)*1e-9
glBindTexture(GL_TEXTURE_2D, self.dstTex)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, N, N, GL_RGBA, GL_UNSIGNED_BYTE, self.dst)
def set_envmap(self, envmap):
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
em = np.zeros(envmap.shape[:2] + (4,), dtype=np.float32)
em[:,:,:3] = envmap; em[:,:,3] = 1;
self.envmap = cl.Image(context, mf.READ_ONLY | mf.COPY_HOST_PTR, fmt,
shape=em.shape[:2], hostbuf=em)
self.sampler = cl.Sampler(context, True, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR)
def set_molecule(self, mol):
self.mol = mol
self.spheredata = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf = self.mol.spheredata)
def load_molecule(self, filename):
self.set_molecule(Molecule(filename))
if __name__ == "__main__":
from pfmloader import load_pfm
r = CLRender()
r.set_molecule(Molecule('data/sugars/sucrose.pdb'))
r.set_envmap(load_pfm('data/probes/stpeters_probe.pfm'))
r.compute()
|
[
"amiller@dappervision.com"
] |
amiller@dappervision.com
|
21e1b0da1f6e231a3370a401206faebd2f2aff3e
|
c351c54ff292d4ce8628cf033f8f3026829d79f3
|
/blog_api/apis/authorization_layer/python/bin/pyrsa-keygen
|
126ebac139a75ad6bfe8c1f5d7e0f83016d8882e
|
[] |
no_license
|
MathiasDarr/Portfolio
|
424ba0d3bd3b36bb9be09a31ea0b9bca2d3cc568
|
0eb6377d9aedba75ac30a0a5583f47dc31d31810
|
refs/heads/master
| 2023-02-06T04:33:44.123544
| 2020-12-31T08:35:45
| 2020-12-31T08:35:45
| 261,949,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
#!/home/mddarr/data/anaconda3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
|
[
"mddarr@gmail.com"
] |
mddarr@gmail.com
|
|
7ecaa4450f543c9a68460f1cc3e01872c9cb707f
|
09ee86d0bd77ca79992f073b6c8b1e98b88cb09b
|
/resource_allocation.py
|
df8c166cbc456843a049ef8501f85c59300fe21a
|
[] |
no_license
|
JaneWuNEU/hitdl_server
|
624fbb5cfea3641cb624a291ed6de1e274982463
|
9076a813c803bc9c47054fff7bae2824304da282
|
refs/heads/master
| 2022-12-29T22:14:44.496492
| 2020-10-13T01:58:19
| 2020-10-13T01:58:19
| 303,327,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,589
|
py
|
import cvxpy as cp
import numpy as np
c1 = np.array([1,3,5])
e1 = np.array([1.505,1.351,1.27])*c1
c2 = np.array([2,5,7])
e2 = np.array([1.844,1.502,1.843])*c2
c3 = np.array([1,5])
e3 = np.array([1.505,1.148])*c3
C = 12
'''
x1 = cp.Variable(name="inception",shape=(len(c1),1),integer=True,pos=True)
y1 = cp.Variable(shape=(len(c1),1),integer=True,pos=True)
x2 = cp.Variable(name="mobilenet",shape=(len(c2),1),integer=True,pos=True)
y2 = cp.Variable(shape=(len(c2),1),integer=True,pos=True)
x3= cp.Variable(name="resnet",shape=(len(c3),1),integer=True,pos=True)
y3 = cp.Variable(shape=(len(c3),1),integer=True,pos=True)
x1 = cp.Variable(name="inception",shape=(len(c1),1),pos=True)
y1 = cp.Variable(shape=(len(c1),1),pos=True)
x2 = cp.Variable(name="mobilenet",shape=(len(c2),1),pos=True)
y2 = cp.Variable(shape=(len(c2),1),pos=True)
x3= cp.Variable(name="resnet",shape=(len(c3),1),pos=True)
y3 = cp.Variable(shape=(len(c3),1),pos=True)
exp1 = e1@cp.multiply(x1-np.ones((len(c1),1)),y1-np.ones((len(c1),1)))
exp2 = e2@cp.multiply(x2-np.ones((len(c2),1)),y2-np.ones((len(c2),1)))
exp3 = e3@cp.multiply(x3-np.ones((len(c3),1)),y3-np.ones((len(c3),1)))
exp4 = c1@cp.multiply(x1-np.ones((len(c1),1)),y1-np.ones((len(c1),1)))
exp5 = c2@cp.multiply(x2-np.ones((len(c2),1)),y2-np.ones((len(c2),1)))
exp6 = c3@cp.multiply(x3-np.ones((len(c3),1)),y3-np.ones((len(c3),1)))
obj = exp1+exp2+exp3
print(obj.shape)
cores_cons = exp4+exp5+exp6
prob1 = cp.Problem(cp.Maximize(obj),
[cp.sum(y1-np.ones((len(c1),1)))==1,
cp.sum(y2-np.ones((len(c2),1)))==1,
cp.sum(y3-np.ones((len(c3),1)))==1,
cores_cons <= C])
result = prob1.solve(gp=True)
'''
class MCKPAllocation:
def __init__(self,CPU_Cores,F):
self.CPU_Cores = CPU_Cores
self.F = F
self.C_upper = round(C*F)
def cpu_const(self):
ins_size = {"inception":{"intra":[1,3,5],"efficiency":[2.605,1.351,1.27]},
"resnet":{"intra":[2,5,7],"efficiency":[1.844,1.502,1.843]},
"mobilenet":{"intra":[1,5],"efficiency":[1.505,1.148]}}
total_plans = len(ins_size["inception"]["intra"])+len(ins_size["resnet"]["intra"])+len(ins_size["mobilenet"]["intra"])
overall_cons = np.zeros(total_plans)
overall_E = np.zeros(total_plans)
model_cons = [np.zeros(total_plans),np.zeros(total_plans),np.zeros(total_plans)]#{"inception":np.zeros(total_plans),"resnet":np.zeros(total_plans),"mobilenet":np.zeros(total_plans)}
cons_start = {"inception":0,"resnet":len(ins_size["inception"]["intra"]),"mobilenet":len(ins_size["resnet"]["intra"])+len(ins_size["inception"]["intra"])}
i = 0
ins_num_upper = np.zeros(total_plans)
ins_num_lower = [np.zeros(total_plans),np.zeros(total_plans),np.zeros(total_plans)]
for model_name in ["inception","resnet","mobilenet"]:
model_cons[i][cons_start[model_name]:cons_start[model_name]+len(ins_size[model_name]["intra"])] = ins_size[model_name]["intra"]
overall_cons[cons_start[model_name]:cons_start[model_name]+len(ins_size[model_name]["intra"])] = ins_size[model_name]["intra"]
overall_E[cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] = ins_size[model_name]["efficiency"]
ins_num_upper[cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] = np.floor(C_upper/np.array(ins_size[model_name]["intra"]))
ins_num_lower[i][cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] =np.ones(len(ins_size[model_name]["intra"]))
i = i+1
return overall_cons,model_cons,overall_E,ins_num_upper.reshape((total_plans,1)),ins_num_lower
def resource_allocation(self):
result = self.cpu_const()
overall_cons = result[0]
model_cons = result[1]
overall_E = result[2]
ins_num_upper = result[3]
ins_num_lower = result[4]
Z = cp.Variable((len(overall_cons),1),integer=True)
obj = cp.Maximize(overall_E@Z)
prob = cp.Problem(obj,[overall_cons@Z<=C,
model_cons[0]@Z<=C_upper,model_cons[1]@Z<=C_upper,model_cons[2]@Z<=C_upper,Z<=ins_num_upper,
ins_num_lower[0] @ Z >=1, ins_num_lower[1] @ Z >=1, ins_num_lower[2] @ Z >=1,
Z <= ins_num_upper,
Z>=np.zeros(shape=(len(overall_cons),1))])
print(prob.solve(),prob.status)
print(Z.value)
resource_allocation()
|
[
"neu_15_wujing@126.com"
] |
neu_15_wujing@126.com
|
bef5e7923ef0e16ee3bfb5807262adf9b9c54494
|
159f1032e3da50f15718e2ca99f6a3e50642b4b0
|
/disquaire_project/disquaire_project/settings.py
|
b5ca2ae7e481309473f430f6948a4b80df16b46c
|
[] |
no_license
|
Ellobo1er/disquaire_project
|
a3b29372dfe95f9938cd84723633f0ef3120ab3e
|
0af1d93b2f8aa6302cb6ecb0b2d5b3bd7ddcb2ef
|
refs/heads/master
| 2023-06-28T14:23:54.285800
| 2021-07-29T16:01:23
| 2021-07-29T16:01:23
| 390,767,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,373
|
py
|
"""
Django settings for disquaire_project project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$4fpf$=&&zwkr1qty!b1gu)57_y+_kvvygn5@bz698or1jqa&s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'store.apps.StoreConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
]
# ...
INTERNAL_IPS = ['127.0.0.1']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'disquaire_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'disquaire_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'disquaire',
'USER': 'postgres',
'PASSWORD': 'admin',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
INTERNAL_IPS = ['127.0.0.1']
|
[
"you@example.com"
] |
you@example.com
|
6618838361e332c0f1e2a1d03010d913848c0609
|
4144df22392350035a9a24fcbc23fd1c6bce5c12
|
/Lib/glyphNameFormatter/rangeProcessors/ipa_extensions.py
|
ccf36e61c84dd3c9b8071eb6a3377ab9c632c3e7
|
[
"BSD-3-Clause",
"Adobe-Glyph"
] |
permissive
|
danielgrumer/glyphNameFormatter
|
55b6076684bed7ff4cc6e37ce4a0bb0e2ce86a4a
|
9a41b3ef02c01cd18afe0232f6e436a2f7379178
|
refs/heads/master
| 2020-12-11T05:35:47.835908
| 2016-03-19T09:50:33
| 2016-03-19T09:50:33
| 53,578,090
| 0
| 0
| null | 2016-03-10T11:07:31
| 2016-03-10T11:07:30
| null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
def process(self):
self.edit("LATIN")
self.edit("OPEN", "open")
self.edit("WITH FISHHOOK", "fishhook")
self.edit("SCRIPT", "script")
self.edit("WITH BELT", "belt")
self.edit("WITH MIDDLE TILDE", "middletilde")
self.edit("WITH LONG LEG", "longleg")
self.edit("WITH CROSSED-TAIL", "crossedtail")
self.edit("BILABIAL", "bilabial")
self.edit("BIDENTAL", "bidental")
self.edit("STRETCHED", "stretched")
self.edit("WITH STROKE", "stroke")
self.edit("SQUAT", "squat")
self.edit("INVERTED", "inverted")
self.edit("REVERSED", "reversed")
self.replace("DZ", "dzed")
self.replace("LZ", "lzed")
self.replace("DIGRAPH")
self.replace("PERCUSSIVE", "percussive")
self.replace("GLOTTAL", "glottal")
self.replace("STOP", "stop")
self.replace("PHARYNGEAL", "pharyngeal")
self.replace("VOICED", "voiced")
self.replace("FRICATIVE", "fricative")
self.replace("LETTER CLICK", "click")
self.replace("LETTER GLOTTAL STOP WITH STROKE", "glottalstopstroke")
self.replace("LETTER SMALL CAPITAL OE", "OEsmall")
self.processDiacritics()
self.processShape()
self.handleCase()
self.replace("LETTER")
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.test import printRange
printRange("IPA Extensions")
|
[
"frederik@typemytype.com"
] |
frederik@typemytype.com
|
e6fae05c449f2092d5fda416fb23b95be3b3aa1f
|
73105a000374f7bbe97dac50b91b0c019826a1ba
|
/account/pipelines.py
|
8fb36edc4bd7c7340a1ddea7f7606a19b22a27d7
|
[
"MIT"
] |
permissive
|
carpedm20/UNIST-auction
|
657e80840e4c6adbfaeebd118acc03d4e04cc2a5
|
f2db1d6fdb2c7781b3c142f8a2582888e24ad06d
|
refs/heads/master
| 2021-01-22T04:43:55.844905
| 2014-10-21T14:01:32
| 2014-10-21T14:01:32
| 22,419,149
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from social_auth.backends.facebook import FacebookBackend
from social_auth.backends.twitter import TwitterBackend
from social_auth.backends import google
from social_auth.signals import socialauth_registered
def get_user_avatar(backend, details, response, social_user, uid,\
user, *args, **kwargs):
url = None
if backend.__class__ == FacebookBackend:
url = "http://graph.facebook.com/%s/picture?type=large" % response['id']
elif backend.__class__ == TwitterBackend:
url = response.get('profile_image_url', '').replace('_normal', '')
else:
url = 'http://www.gravatar.com/avatar/00000000000000000000000000000000'
if url:
user.profile_image_url = url
user.save()
|
[
"carpedm20@gmail.com"
] |
carpedm20@gmail.com
|
6237d5cd45456cf4aea5e5eaa2cd7525a5a0f984
|
22bf910b64283b3c15cc4d80542e83fa89e9f09d
|
/monero_glue/messages/DebugLinkShowTextStyle.py
|
6ddc6028fdf6a547536fc717cd8d48b7bf7a8654
|
[
"MIT"
] |
permissive
|
ph4r05/monero-agent
|
24ed1aa17d6616b2ae6bcdb7b9997f982f8b7b5d
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
refs/heads/master
| 2022-10-18T06:30:43.550133
| 2021-07-01T16:27:56
| 2021-07-01T16:27:56
| 126,215,119
| 24
| 5
|
MIT
| 2022-09-23T22:53:44
| 2018-03-21T17:18:21
|
Python
|
UTF-8
|
Python
| false
| false
| 315
|
py
|
# Automatically generated by pb2py
# fmt: off
if False:
from typing_extensions import Literal
NORMAL = 0 # type: Literal[0]
BOLD = 1 # type: Literal[1]
MONO = 2 # type: Literal[2]
MONO_BOLD = 3 # type: Literal[3]
BR = 4 # type: Literal[4]
BR_HALF = 5 # type: Literal[5]
SET_COLOR = 6 # type: Literal[6]
|
[
"dusan.klinec@gmail.com"
] |
dusan.klinec@gmail.com
|
759b0b137a7faf1da9dc6ffbab58053fdcbad295
|
bb5465b31067d8e2ef20a93c87bfad2c6a8e6569
|
/orders/forms.py
|
c30ac21ad1c2d2d3346d20718321be245f6af33b
|
[] |
no_license
|
greypanda/Django-Bootcamp-1
|
cc7e1b131b55be4ca224702397f0e4aee6e1d2d9
|
d66886bd2ab65f07cba08dc26640f52e0da72ac4
|
refs/heads/main
| 2022-12-27T01:27:26.516712
| 2020-10-14T23:45:57
| 2020-10-14T23:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
from django import forms
from .models import Order
class OrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
product = kwargs.pop("product") or None
super().__init__(*args, **kwargs)
self.product = product
class Meta:
model = Order
fields = [
'shipping_address',
'billing_address',
]
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
# check product inventory
if self.product != None:
if not self.product.has_inventory():
raise forms.ValidationError("This product is out of inventory.")
return cleaned_data
|
[
"hello@teamcfe.com"
] |
hello@teamcfe.com
|
3a05d1a9e15233697f2611e6105e3a61f8da2282
|
b0ede55e98d454f558e5397369f9265893deedb5
|
/SWEA/D3/3750_digit_sum.py
|
91b7ee6c6d6baa5d0dc8631446283554e277d0fb
|
[] |
no_license
|
YeonggilGo/python_practice
|
5ff65852900c4c6769d541af16f74a27a67920ec
|
43082568b5045a8efc1d596074bdca3e66b2fed1
|
refs/heads/master
| 2023-06-22T02:09:31.906745
| 2023-06-17T01:27:22
| 2023-06-17T01:27:22
| 280,361,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# 매 테스트 케이스마다 print를 하지않고
# 배열에 저장해서 한꺼번에 출력하니까 동작시간이 훨씬 줄어들었다.
# 이유가 뭔지는 아직 모르겠다.
T = int(input())
ans = []
for tc in range(1, T + 1):
N = input()
while len(N) > 1:
N_li = list(map(int, N))
N = str(sum(N_li))
ans.append(N)
for tc in range(0, T):
print(f'#{tc+1} {ans[tc]}')
|
[
"dudrlf1859@naver.com"
] |
dudrlf1859@naver.com
|
25e7860fa269e96b48ce74d7908cadb94fc03315
|
0ddbbc997883aa7c17e50a08de7aa40c3a4955c7
|
/project1/package1/plot_test.py
|
ac3581147ec93402bf0aa6e75ea365f5c588c3e6
|
[] |
no_license
|
kwoolter/vscode-online
|
39eef2ab9c13c0460d6f8a45a8674906e7594bdd
|
f13c0a1378a2724a44d95ce4ab06700eb0642cae
|
refs/heads/master
| 2022-07-14T01:44:28.495267
| 2020-05-16T10:21:42
| 2020-05-16T10:21:42
| 264,375,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 20, 100)
plt.plot(x, np.sin(x))
plt.show(block=False)
input('press <ENTER> to continue')
|
[
"keith.woolterton@gmail.com"
] |
keith.woolterton@gmail.com
|
70cd3506623f02e09d026e8fbf4721df8d98cd99
|
8cb8bfd2dae516612251039e0632173ea1ea4c8a
|
/modules/user/publisher_user.py
|
946ebcc7023cc69b7181138f470652fe3331ebb9
|
[] |
no_license
|
nyzsirt/lift-prod
|
563cc70700d26a5812a1bce0bd9795998dce6e99
|
9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b
|
refs/heads/master
| 2020-04-22T01:05:42.262876
| 2019-02-09T13:31:15
| 2019-02-09T13:31:15
| 170,003,361
| 1
| 0
| null | 2019-02-10T17:11:50
| 2019-02-10T17:11:50
| null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
class PublisherUserOnCreate:
"""
Publisher for notifying subscribers on new service admin user creating
"""
def __init__(self):
self.__new_user=None
self.__subscribers=[]
@property
def new_user(self):
"""
Publisher notifies subscribers about this user dict
:return: new user dict
"""
return self.__new_user
@new_user.setter
def new_user(self,user):
"""
Set new_user dict and if not None notify all subscribers
:param user: user dict
:return: Void
"""
self.__new_user=user
if self.__new_user:
self.notify_subscribers()
def notify_subscribers(self):
for subscriber in self.__subscribers :
subscriber.notify()
pass
def add_subscriber(self,subscriber):
self.__subscribers.append(subscriber)
def remove_subscriber(self,subscriber):
self.__subscribers.remove(subscriber)
|
[
"mutlu.erdem@soft-nec.com"
] |
mutlu.erdem@soft-nec.com
|
1bacbdd7d2adb957a389d64b3941a31252aa6e64
|
609582ee37a01ac6a67fb9c957825dcd3c9a5b3a
|
/LeetCode_Linked_List/160_Intersection_Of_Two_Linked_List.py
|
b95d8c954f372d3807f1ca3cb6bbbed0548eadf4
|
[] |
no_license
|
captainjack331089/captainjack33.LeetCode
|
a9ad7b3591675c76814eda22e683745068e0abed
|
4c03f28371e003e8e6a7c30b7b0c46beb5e2a8e7
|
refs/heads/master
| 2022-03-07T19:53:40.454945
| 2019-11-06T19:32:00
| 2019-11-06T19:32:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
"""
160. Intersection of Two Linked Lists
Category: Linked List
Difficulty: Easy
"""
"""
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNone(self, headA, headB):
p1 = headA
p2 = headB
while p1 != p2:
if not p1:
p1 = headB
else:
p1 = p1.next
if not p2:
p2 = headA
else:
p2 = p2.next
return p2
|
[
"qfhjack@gmail.com"
] |
qfhjack@gmail.com
|
6d960235947bbf4e658d18e273fb3658fd207da8
|
91b3f9f1803161c22ff5bed3e5604a07d67728ac
|
/patterns/factory/overlay_factory.py
|
802822ffdc740832cd8fbf414f2218ceb02f190f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
AndreTeixeira1998/TekkenBot
|
dab01fd022f91787f709241a17a903291e7089bd
|
015c601afbea5d75a46b3385f1d322b2655249b0
|
refs/heads/master
| 2023-07-17T16:52:14.182255
| 2021-04-22T17:29:55
| 2021-04-22T17:29:55
| 273,787,610
| 0
| 0
|
MIT
| 2020-06-20T21:34:27
| 2020-06-20T21:34:26
| null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019, Alchemy Meister
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import inspect
from gui.my_tkinter.overlay import Overlay
from .factory import Factory
class OverlayFactory(Factory):
def __init__(self):
super().__init__()
self.__register_subclasses(Overlay)
def __register_subclasses(self, parent_class):
for cls in parent_class.__subclasses__():
if inspect.isabstract(cls):
self.__register_subclasses(cls)
else:
self.register_class(cls.CLASS_ID, cls)
|
[
"jesusesma@gmail.com"
] |
jesusesma@gmail.com
|
c48a6539ae876c3189fcf79c05265e1fdc2a596b
|
4a399d20f9934c4984bab229a015be69e9189067
|
/devel/lib/python2.7/dist-packages/roboy_communication_control/msg/_DebugNotification.py
|
39749bcc8f1c881c53ae2f12b81695e3eb409819
|
[
"BSD-3-Clause"
] |
permissive
|
Roboy/myoarm_small_FPGA
|
09af14c7d82c9e8fc923842ae5aad1be6344bf27
|
f2f11bee50078d8a03f352e3b3ef9f3d9244d87a
|
refs/heads/master
| 2021-01-21T03:21:49.777564
| 2017-08-30T22:11:44
| 2017-08-30T22:11:44
| 101,892,113
| 0
| 0
| null | 2017-08-30T14:49:18
| 2017-08-30T14:33:46
| null |
UTF-8
|
Python
| false
| false
| 7,015
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from roboy_communication_control/DebugNotification.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class DebugNotification(genpy.Message):
_md5sum = "e83a19f2165c907848c09efd00ad9d5e"
_type = "roboy_communication_control/DebugNotification"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 code
string object
string msg
string extra
int32 validityDuration"""
__slots__ = ['code','object','msg','extra','validityDuration']
_slot_types = ['int32','string','string','string','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
code,object,msg,extra,validityDuration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(DebugNotification, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.code is None:
self.code = 0
if self.object is None:
self.object = ''
if self.msg is None:
self.msg = ''
if self.extra is None:
self.extra = ''
if self.validityDuration is None:
self.validityDuration = 0
else:
self.code = 0
self.object = ''
self.msg = ''
self.extra = ''
self.validityDuration = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_i().pack(self.code))
_x = self.object
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.msg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.extra
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.validityDuration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(self.code,) = _get_struct_i().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.object = str[start:end].decode('utf-8')
else:
self.object = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.msg = str[start:end].decode('utf-8')
else:
self.msg = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.extra = str[start:end].decode('utf-8')
else:
self.extra = str[start:end]
start = end
end += 4
(self.validityDuration,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_i().pack(self.code))
_x = self.object
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.msg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.extra
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.validityDuration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(self.code,) = _get_struct_i().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.object = str[start:end].decode('utf-8')
else:
self.object = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.msg = str[start:end].decode('utf-8')
else:
self.msg = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.extra = str[start:end].decode('utf-8')
else:
self.extra = str[start:end]
start = end
end += 4
(self.validityDuration,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
|
[
"simon.trendel@tum.de"
] |
simon.trendel@tum.de
|
674d799ef87465a5e5b80fdd21d63878fb2e1361
|
e7b956cd98f3400249cd5097029f0a1a9e8ba645
|
/app/relations/many_to_many/migrations/0002_auto_20180205_0701.py
|
727d216af345dda00738984b376c0fcafe2b46fb
|
[] |
no_license
|
standbyme227/fc-django-document
|
8ffc4430099fbee037f1336e319e40292bcf7af4
|
8f01c108f773f3f7edc49e1f6527ed3789754ba9
|
refs/heads/master
| 2021-05-04T16:08:11.133487
| 2018-02-22T03:05:44
| 2018-02-22T03:05:44
| 120,244,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
# Generated by Django 2.0.2 on 2018-02-05 07:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('many_to_many', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Postlike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='many_to_many.Post')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='postlike',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='many_to_many.User'),
),
]
|
[
"standbyme227@gmail.com"
] |
standbyme227@gmail.com
|
c0816befac5b3984dad7c534e48520cc62c3eb87
|
88c1fa6dd5b51a93c4345951c41c4f56a82ba5a3
|
/LiveProject-Python/AppBuilder9000/ZPYLP0612/GreatestComedies/models.py
|
8eda5a405781578635b00d1f099ff55c4023617a
|
[] |
no_license
|
Sean-Beyer/PythonDjango-LiveProject
|
83335c4d5e22d00c34dac1c71c39f770ad896c4e
|
986b567fad49368c52182eb5196534ff8a8ebcfc
|
refs/heads/master
| 2022-12-13T22:43:21.820355
| 2020-09-01T00:34:18
| 2020-09-01T00:34:18
| 291,854,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from django.db import models
# Created models
class Comedies(models.Model):
comedy = models.CharField(max_length=100, blank=True)
actor = models.CharField(max_length=100, blank=True)
director = models.CharField(max_length=100, blank=True)
year = models.CharField(max_length=100, blank=True)
imdb_rating = models.CharField(max_length=100, null=True)
rating = models.DecimalField(max_digits=2, decimal_places=1, null=True)
review = models.CharField(max_length=1000, blank=True, null=True)
Comedy= models.Manager() # object manager for Movie Database
def __str__(self):
return self.comedy
|
[
"61070387+Thalious@users.noreply.github.com"
] |
61070387+Thalious@users.noreply.github.com
|
ddfeb229c2eb58e3c70f6c7666511fd98cae0dd1
|
0ced37fd5631850c43319b43aa2ac48a105eeb08
|
/package/json_scraper.py
|
3c3aabec5aae14fa57f94f0663d4e51004aa6958
|
[] |
no_license
|
chomman/earthquake-finder
|
c0d9b0bd5104b10b0bd7beec5d11f58c0d22e69c
|
d7f4de33843e7ceed5c2113cdefbb908f11338a2
|
refs/heads/master
| 2020-12-28T20:41:48.404930
| 2015-03-31T15:14:24
| 2015-03-31T15:14:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#!/usr/bin/python
## @json_scraper.py
# This file makes a request to an external webpage, and returns the json
# response content.
import requests
## scrape: scrape the content of provided url.
def scrape(url):
# request (get) given url, store json response content
r = requests.get(url)
data = r.json()
# return content
return data
|
[
"jeff1evesque@yahoo.com"
] |
jeff1evesque@yahoo.com
|
df4a7dfe2740775f79df27bea8aabba64637d720
|
6b97da799cb9b72d711a5e1d6321e4e11f3cbe51
|
/bin/iptest3
|
8f85d0e0d8a8e3a6e242b360c552cbf740df3398
|
[] |
no_license
|
dx-entity/env_parabola
|
3531120d213ade533052161ec70f3a511f2fc90a
|
f830d5f05a578b1ed2b16f6898fb226e27de6b52
|
refs/heads/master
| 2021-01-09T20:22:51.509076
| 2016-07-22T06:55:49
| 2016-07-22T06:55:49
| 63,930,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/root/python_pro/env_parabola/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dengxin@iie.ac.cn"
] |
dengxin@iie.ac.cn
|
|
a92d9bc93ed8d9c1567fb32f622b50c221616b5e
|
b5937928a48340569f673e237e42f32ab62cfd15
|
/src/pathCrossing/path.py
|
19ce0bdef5e67249b47937b919aa1930eff3039d
|
[
"CC0-1.0"
] |
permissive
|
rajitbanerjee/leetcode
|
79731de57ab4b0edd765b3cbb4aac459973fb22d
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
refs/heads/master
| 2021-06-13T11:19:03.905797
| 2021-06-02T14:40:08
| 2021-06-02T14:40:08
| 191,103,205
| 2
| 1
| null | 2020-02-23T23:41:45
| 2019-06-10T05:34:46
|
Java
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
class Solution:
def isPathCrossing(self, path: str) -> bool:
x, y = 0, 0
visited = {(x, y)}
for p in path:
if p == 'N':
y += 1
elif p == 'S':
y -= 1
elif p == 'E':
x += 1
else:
x -= 1
if (x, y) in visited:
return True
else:
visited.add((x, y))
return False
if __name__ == '__main__':
path = input("Input: ")
print(f"Output: {Solution().isPathCrossing(path)}")
|
[
"rajit.banerjee@ucdconnect.ie"
] |
rajit.banerjee@ucdconnect.ie
|
8399f0725684d5f05d0c7cdd73ca17a6c14c7062
|
403217dc6e0ea465b90d26faaa630dc30b04b396
|
/tests/test_transformers.py
|
47c941b50561247fa4c2c912717b5c08700f0256
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
fuzeman/QueryCondenser
|
f5708fe855c449e195d20d7db9ca5e7b0b657541
|
624d8db0077e540b4214eb44bb1def4bd659c50a
|
refs/heads/master
| 2016-09-11T03:35:02.571079
| 2013-11-25T04:04:47
| 2013-11-25T04:04:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest import TestCase
from logr import Logr
from qcond import MergeTransformer, SliceTransformer
from qcond.helpers import itemsMatch
from qcond.transformers.base import Transformer
from qcond.transformers.merge import DNode, print_tree
class TestTransformer(TestCase):
def test_run(self):
transformer = Transformer()
self.assertRaises(NotImplementedError, transformer.run, [])
class TestMergeTransformer(TestCase):
def setUp(self):
Logr.configure(logging.DEBUG)
self.merge = MergeTransformer()
def test_apartment_23(self):
self.assertSequenceEqual(self.merge.run([
"Don't Trust the B---- in Apartment 23",
"Apartment 23",
"Apt 23",
"Don't Trust the B in Apt 23",
"Don't Trust the B- in Apt 23",
"Don't Trust the Bitch in Apartment 23",
"Don't Trust the Bitch in Apt 23",
"Dont Trust the Bitch in Apartment 23"
]), [
'dont trust the',
'dont trust the apartment 23',
'dont trust the apt 23',
'apt 23',
'apartment 23'
])
def test_legend_of_korra(self):
self.assertSequenceEqual(self.merge.run([
"The Legend of Korra",
"The Last Airbender The Legend of Korra",
"Avatar: The Legend of Korra",
"Legend of Korra",
"La Leggenda Di Korra"
]), [
'the',
'the korra',
'avatar the legend of korra',
'la leggenda di korra',
'legend of korra'
])
def test_merge_is_order_independent(self):
root_one = [
self._create_chain(['avatar', 'the', 'legend', 'of', 'korra']),
self._create_chain(['la', 'leggenda', 'di', 'korra']),
self._create_chain(['the', 'last', 'airbender', 'the', 'legend', 'of', 'korra'])
]
self._create_chain(['legend', 'of', 'korra'], root_one[-1])
root_one.append(self._create_chain(['legend', 'of', 'korra']))
result_one = self.merge.merge(root_one)
Logr.debug("-----------------------------------------------------------------")
root_two = [
self._create_chain(['the', 'legend', 'of', 'korra']),
]
self._create_chain(['last', 'airbender', 'the', 'legend', 'of', 'korra'], root_two[-1])
root_two += [
self._create_chain(['legend', 'of', 'korra']),
self._create_chain(['la', 'leggenda', 'di', 'korra']),
self._create_chain(['avatar', 'the', 'legend', 'of', 'korra'])
]
result_two = self.merge.merge(root_two)
Logr.debug("=================================================================")
assert itemsMatch(
self._get_chain_values(result_one),
self._get_chain_values(result_two)
)
def test_merge(self):
pass
def _get_chain_values(self, node_or_nodes):
if type(node_or_nodes) is list:
results = []
for node in node_or_nodes:
results += self._get_chain_values(node)
return results
node = node_or_nodes
if node.right:
return self._get_chain_values(node.right)
score, value, original_value = node.full_value()
return [value]
def _create_chain(self, words, root=None):
if not root:
root = DNode(words[0], None)
words = words[1:]
last_node = root
while len(words):
word = words.pop(0)
node = DNode(word, last_node)
last_node.right.append(node)
last_node = node
return root
class TestSliceTransformer(TestCase):
def setUp(self):
self.slice = SliceTransformer()
def test_apartment_23(self):
self.assertSequenceEqual(self.slice.run([
"Don't Trust the B---- in Apartment 23",
"Apartment 23",
"Apt 23",
"Don't Trust the B in Apt 23",
"Don't Trust the B- in Apt 23",
"Don't Trust the Bitch in Apartment 23",
"Don't Trust the Bitch in Apt 23",
"Dont Trust the Bitch in Apartment 23"
]), [
"Don't Trust the B in Apt 23",
'Dont Trust the Bitch in Apartment 23',
'Apartment 23',
'Apt 23'
])
def test_legend_of_korra(self):
self.assertSequenceEqual(self.slice.run([
"The Legend of Korra",
"The Last Airbender The Legend of Korra",
"Avatar: The Legend of Korra",
"Legend of Korra",
"La Leggenda Di Korra"
]), [
'Legend of Korra',
'La Leggenda Di Korra'
])
|
[
"gardiner91@gmail.com"
] |
gardiner91@gmail.com
|
1755ace993f4ea02065efd561ec2b726b5d17337
|
838302a39e25067fa7152c1a21574d80dbc25e94
|
/routes/urls.py
|
482446f2644cfefb444e61bbee0deb991c87a2b7
|
[] |
no_license
|
Vadym-Hub/route_search
|
53f46b39f588bb9ee53f1f70d09f045f1d466492
|
b1c0b5ac754e5e3601ab6815649eda4f50e9ae32
|
refs/heads/master
| 2021-09-28T01:40:57.271666
| 2020-07-12T23:03:27
| 2020-07-12T23:03:27
| 250,011,206
| 0
| 0
| null | 2021-09-22T18:57:23
| 2020-03-25T15:07:23
|
Python
|
UTF-8
|
Python
| false
| false
| 525
|
py
|
from django.urls import path
from .views import HomeView, RouteDetailView, RouteListView, add_route, find_routes, RouteDeleteView
apps_name = 'routes'
urlpatterns = [
path('find/', find_routes, name='find_routes'),
path('add_route/', add_route, name='add_route'),
path('list/', RouteListView.as_view(), name='list'),
path('detail/<int:pk>/', RouteDetailView.as_view(), name='detail'),
path('delete/<int:pk>/', RouteDeleteView.as_view(), name='delete'),
path('', HomeView.as_view(), name='home'),
]
|
[
"webratel@gmail.com"
] |
webratel@gmail.com
|
a72e20d9939dd2d43d0f6b798a108c4a1ceb872e
|
e99dfc900052272f89d55f2fd284389de2cf6a73
|
/apostello/forms.py
|
e069a875e9ac32cb666be19e6c15e47374ee20b0
|
[
"MIT"
] |
permissive
|
armenzg/apostello
|
a3e6ca3d34917608af79fbab4134ee4de1f5e8ee
|
1827547b5a8cf94bf1708bb4029c0b0e834416a9
|
refs/heads/master
| 2021-01-18T18:16:02.364837
| 2017-03-22T20:34:21
| 2017-03-22T20:34:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,984
|
py
|
from django import forms
from django.forms import ModelMultipleChoiceField
from apostello.models import Keyword, Recipient, RecipientGroup, UserProfile
from apostello.validators import gsm_validator, less_than_sms_char_limit
class SendAdhocRecipientsForm(forms.Form):
"""Send an sms to ad-hoc groups."""
content = forms.CharField(
validators=[gsm_validator, less_than_sms_char_limit],
required=True,
min_length=1,
)
recipients = forms.ModelMultipleChoiceField(
queryset=Recipient.objects.filter(is_archived=False),
required=True,
help_text='',
widget=forms.SelectMultiple(
attrs={
"class": "ui compact search dropdown",
"multiple": "",
}
),
)
scheduled_time = forms.DateTimeField(
required=False,
help_text='Leave this blank to send your message immediately, '
'otherwise select a date and time to schedule your message',
widget=forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
)
def clean(self):
"""Override clean method to check SMS cost limit."""
cleaned_data = super(SendAdhocRecipientsForm, self).clean()
if 'recipients' in cleaned_data and 'content' in cleaned_data:
# if we have no recipients, we don't need to check cost limit
Recipient.check_user_cost_limit(
cleaned_data['recipients'],
self.user.profile.message_cost_limit, cleaned_data['content']
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SendAdhocRecipientsForm, self).__init__(*args, **kwargs)
class SendRecipientGroupForm(forms.Form):
"""Send an sms to pre-defined group."""
content = forms.CharField(
validators=[gsm_validator, less_than_sms_char_limit],
required=True,
min_length=1,
)
recipient_group = forms.ModelChoiceField(
queryset=RecipientGroup.objects.filter(is_archived=False),
required=True,
empty_label='Choose a group...',
widget=forms.Select(
attrs={
"class": "ui fluid dropdown",
"id": "id_recipient_group",
}
),
)
scheduled_time = forms.DateTimeField(
required=False,
help_text='Leave this blank to send your message immediately, '
'otherwise select a date and time to schedule your message',
widget=forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
)
def clean(self):
"""Override clean method to check SMS cost limit."""
cleaned_data = super(SendRecipientGroupForm, self).clean()
if 'recipient_group' in cleaned_data and 'content' in cleaned_data:
# if we have no recipient group, we don't need to check cost limit
cleaned_data['recipient_group'].check_user_cost_limit(
self.user.profile.message_cost_limit, cleaned_data['content']
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SendRecipientGroupForm, self).__init__(*args, **kwargs)
class ManageRecipientGroupForm(forms.ModelForm):
"""
Manage RecipientGroup updates and creation.
__init__ and save are overridden to pull in group members.
"""
class Meta:
model = RecipientGroup
exclude = ['is_archived']
class RecipientForm(forms.ModelForm):
"""Handle Recipients."""
class Meta:
model = Recipient
exclude = ['is_archived', 'is_blocking']
widgets = {
'number': forms.TextInput(attrs={'placeholder': '+447259006790'}),
'groups': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "groups_dropdown",
}
),
}
class UserChoiceField(ModelMultipleChoiceField):
"""Display emails and user names when selecting users."""
def label_from_instance(self, obj):
"""Display the user's label."""
return '{0} ({1})'.format(obj.email, obj.username)
class KeywordForm(forms.ModelForm):
"""Handle Keywords."""
class Meta:
model = Keyword
exclude = ['is_archived', 'last_email_sent_time']
field_classes = {
'subscribed_to_digest': UserChoiceField,
'owners': UserChoiceField,
}
widgets = {
'keyword':
forms.TextInput(attrs={'placeholder': '(No spaces allowed)'}),
'description': forms.TextInput(
attrs={
'placeholder':
'Please provide a description of your keyword.'
}
),
'custom_response': forms.TextInput(
attrs={
'placeholder':
'eg: Thanks %name%, you have sucessfully signed up.'
}
),
'activate_time': forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
'deactivate_time': forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
'owners': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "owners_dropdown",
}
),
'linked_groups': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "linked_group_dropdown",
}
),
'subscribed_to_digest': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "digest_dropdown",
}
),
}
class CsvImport(forms.Form):
"""Handle CSV imports."""
csv_data = forms.CharField(
help_text='John, Calvin, +447095237960', widget=forms.Textarea
)
class UserProfileForm(forms.ModelForm):
"""Handle User Permission Updates"""
class Meta:
model = UserProfile
exclude = ['user', ]
class GroupAllCreateForm(forms.Form):
"""Form used to create groups with all recipients.
Should only be used to create, not edit groups.
"""
group_name = forms.CharField(
help_text='Name of group.\n'
'If this group already exists it will be overwritten.',
max_length=150,
)
|
[
"montgomery.dean97@gmail.com"
] |
montgomery.dean97@gmail.com
|
93fa705b2aa486c2ea927afb7382f4d04a4ab1b2
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/masterticketsplugin/branches/0.10/setup.py
|
959b0c3c08eb24427c6df3a00be9187b87778476
|
[] |
no_license
|
woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657
| 2013-05-24T17:31:23
| 2013-05-24T17:31:23
| 13,418,837
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from setuptools import setup
setup(
name = 'TracMasterTickets',
version = '1.0',
packages = ['mastertickets'],
package_data = { 'mastertickets': ['htdocs/*.js', 'htdocs/*.css' ] },
author = "Noah Kantrowitz",
author_email = "noah@coderanger.net",
description = "Provides support for ticket dependencies and master tickets.",
license = "BSD",
keywords = "trac plugin ticket dependencies master",
url = "http://trac-hacks.org/wiki/MasterTicketsPlugin",
classifiers = [
'Framework :: Trac',
],
install_requires = ['TracWebAdmin'],
entry_points = {
'trac.plugins': [
'mastertickets.web_ui = mastertickets.web_ui',
]
}
)
|
[
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] |
coderanger@7322e99d-02ea-0310-aa39-e9a107903beb
|
cb90179a0f2c0c6d6d9ecd0add119d15ce349b91
|
cdaeb2c9bbb949b817f9139db2d18120c70f1694
|
/setup.py
|
3464bc1d5f33496f16de775ce95e205c38b6b79e
|
[
"Apache-2.0"
] |
permissive
|
sreekanthpulagam/rakam-python-client
|
665c984ac7a29b57ead6feaeb99a69ba345220e6
|
8bd843208b03726d6ce89ee343b48b889b576e0e
|
refs/heads/master
| 2021-01-24T15:42:36.374366
| 2016-07-19T21:49:26
| 2016-07-19T21:49:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
# coding: utf-8
"""
Rakam API Documentation
An analytics platform API that lets you create your own analytics services.
OpenAPI spec version: 0.5
Contact: contact@rakam.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "rakam_client"
VERSION = "0.5"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Rakam API Documentation",
author_email="contact@rakam.io",
url="",
keywords=["Swagger", "Rakam API Documentation"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
An analytics platform API that lets you create your own analytics services.
"""
)
|
[
"emrekabakci@gmail.com"
] |
emrekabakci@gmail.com
|
cb86190241829fe4dbed3dcca133c4bba33f705d
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_12_01_preview/operations/_tenant_level_access_review_instance_contacted_reviewers_operations.py
|
48d84587e79ccee9f1e63b27a608b56c06fc182f
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,033
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(schedule_definition_id: str, id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/contactedReviewers",
) # pylint: disable=line-too-long
path_format_arguments = {
"scheduleDefinitionId": _SERIALIZER.url("schedule_definition_id", schedule_definition_id, "str"),
"id": _SERIALIZER.url("id", id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TenantLevelAccessReviewInstanceContactedReviewersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_12_01_preview.AuthorizationManagementClient`'s
:attr:`tenant_level_access_review_instance_contacted_reviewers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, schedule_definition_id: str, id: str, **kwargs: Any
) -> Iterable["_models.AccessReviewContactedReviewer"]:
"""Get access review instance contacted reviewers.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewContactedReviewer or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewContactedReviewer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewContactedReviewerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
schedule_definition_id=schedule_definition_id,
id=id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewContactedReviewerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/contactedReviewers"} # type: ignore
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
24983dba27a4c3513d731d7b06bc5dccdeee9d43
|
7dba60ae27ff247705607839348f017b85f5da16
|
/nyumbax/migrations/0002_auto_20210411_0803.py
|
2ec99f45ae6bd3b4e3db2f3f8d33a24f6ac451aa
|
[
"MIT"
] |
permissive
|
BwanaQ/nyumba-kumi
|
7edccb6745ede6d9f6faf5bd8c0dcf6e24726991
|
c264b0941c77a4d7175a2dc5380723bea1acf380
|
refs/heads/master
| 2023-04-05T09:32:34.867456
| 2021-04-13T15:54:16
| 2021-04-13T15:54:16
| 356,136,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
# Generated by Django 3.2 on 2021-04-11 08:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nyumbax', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Essential',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('officer', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='hood',
name='admin',
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"thunjawax@gmail.com"
] |
thunjawax@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.