blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96b61046248a3e72e93e236aec968705b8d8a09c
|
cc78de009a8e7805f9f6a852774e0384b11bfdcb
|
/testcase/common/basePage/basePage.py
|
e16a2bdc7ec581f43c1f2ee9d521c66c74831b5f
|
[] |
no_license
|
williamzxl/app_test_many_devices
|
c1806e54c17a84f18a04c3808604633c2deba052
|
dd5434018fadd11d5462903cafaafbb5b387c24a
|
refs/heads/master
| 2020-03-29T21:20:51.239295
| 2019-03-05T03:13:56
| 2019-03-05T03:13:56
| 150,361,766
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
from testcase.common.basePage.web_view import WebView
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from utils.log import logger
class BasePage(WebView):
def __init__(self, page=None, browser_type=None):
if page:
self.driver = page.driver
else:
super(BasePage, self).__init__(browser_type=browser_type)
def get_driver(self):
return self.driver
def open(self, appium_url, desired_caps):
try:
logger.info("Open appium_url: {}".format(appium_url))
logger.info("desired_caps:{}".format(desired_caps))
self.get(appium_url, desired_caps)
except:
logger.warning("Cant open appium url:{}".format(appium_url))
raise ValueError("Connect appium failed!")
def find_element(self, *loc):
try:
WebDriverWait(self.driver, 30).until(EC.visibility_of_element_located(loc))
logger.info("Success return self.driver.find_element(*loc):{}".format(loc))
return self.driver.find_element(*loc)
except TimeoutError:
logger.error("In {} cant find {}".format(self, loc))
return False
def find_elements(self, *loc):
try:
WebDriverWait(self.driver, 30).until(EC.visibility_of_element_located(loc))
logger.info("Success return self.driver.find_element(*loc):{}".format(loc))
return self.driver.find_elements(*loc)
except TimeoutError:
# print("In {} cant find {}".format(self, loc))
logger.error("In {} cant find {}".format(self, loc))
return False
# def script(self, src):
# self.driver.execute_script(src)
def sendKeys(self, loc, value, clear_first=True, click_first=True):
try:
# loc = getattr(self, "_{}".format(loc))
if click_first:
# self.find_element(*loc).click()
loc.click()
if clear_first:
# self.find_element(*loc).clear()
loc.clear()
# self.find_element(*loc).send_keys(value)
loc.send_keys(value)
except AttributeError:
logger.error("{} page cant find {} element".format(self, loc))
def get_url(self):
return self.driver.current_url
def getEleText(self,ele):
return ele.text
def getEleSize(self, ele):
return ele.size
def getEleLocation(self, ele):
return ele.location
def is_selected(self, element):
element.is_selected()
def is_enabled(self, element):
element.is_enabled()
def is_displayed(self, element):
element.is_displayed()
def enter(self, element):
element.send_keys(Keys.RETURN)
def click(self, element):
element.click()
def submit(self):
pass
def getEleAttribute(self, element, attribute):
return element.get_attribute(attribute)
# def getAttribute(self, ele, name):
# return ele.get_attribute(name)
def getText(self, element):
try:
return element.text
except SyntaxError:
logger.error("No such element TEXT")
def getTitle(self):
return self.driver.title
def getCurrentUrl(self):
return self.driver.current_url
def get_contexts(self):
return self.driver.contexts()
def get_current_context(self):
return self.driver.current_context()
def get_context(self):
return self.driver.context()
def page_source(self):
return self.driver.page_source
def page_source_test(self):
return self.driver.page_source
if __name__ == "__main__":
test = BasePage()
test.open()
|
[
"1053418947@qq.com"
] |
1053418947@qq.com
|
986c55e0d84ee0bd44afcf5e2e73e30436d3f834
|
a3746020cf091f433beb41bde1b62818b4de569b
|
/new_rule/ticket-rules/oracle/DML_SORT.py
|
3010c2c185dc831b9fbf2b84a1610544000dec4e
|
[] |
no_license
|
kk71/sqlaudit
|
59bab5765a67f56f1dd2f3103812051c5acbbc49
|
747aaa02573a9c2b46a9e14415d27c0ab8e6158c
|
refs/heads/master
| 2023-02-04T18:38:46.125746
| 2020-06-05T09:49:46
| 2020-06-05T09:49:46
| 323,559,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
import re
def code(rule, entries, **kwargs):
single_sql: dict = kwargs.get("single_sql")
sql_text: str = single_sql["sql_text_no_comment"]
dml_sort = re.compile("(\\s)?((update )|(delete )).*order by")
if dml_sort.search(sql_text):
return -rule.weight, []
return None, []
code_hole.append(code)
|
[
"kai.fang@kirintech.cn"
] |
kai.fang@kirintech.cn
|
305b4d8a0ed416ed5c175894f2c49dbe30f16039
|
c570dcfc3ec166f73719a81b02262bf2885b458b
|
/setup.py
|
efa6fdfa8b383ac34d7981c70b9a5883ca632879
|
[] |
no_license
|
thatch45/archinator
|
9eb139a064b8dafe3a63ec9a27177a21b96357ef
|
2e4d9874b0cd9fee68f90ebe4bf2c20dfb1fb220
|
refs/heads/master
| 2020-06-01T03:52:43.793091
| 2013-06-24T05:24:04
| 2013-06-24T05:24:04
| 10,711,450
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='archinator',
version='0.5.0',
description='Virtual machine generator for ArchLinux',
author='Thomas S Hatch',
author_email='thatch45@gmail.com',
url='https://github.com/thatch45/archinator',
packages=[
'archinator',
'archinator.utils',
],
scripts=['scripts/archinator'],
)
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
eaa85e885404219c0a831b463a0c456fffe1d7f0
|
4546398a18590e4e182629fb55d185547dd6df0a
|
/2023/problems/millifaersla/submissions/partially_accepted/strings_0.py
|
a370ff95a0790d6ce6228aa8fa0be95e244d3908
|
[] |
no_license
|
ForritunarkeppniFramhaldsskolanna/Keppnir
|
352341fa97c6349af65b513c03171f3e706f7db2
|
65c8eb5358d8a49f956edf76c2d47b9372accc3c
|
refs/heads/master
| 2023-04-28T15:33:36.396225
| 2023-04-23T15:00:15
| 2023-04-23T15:00:15
| 78,303,702
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
#!/usr/bin/python3
a = input()
b = input()
c = input()
if a < b and a < c:
print("Monnei")
elif b < a and b < c:
print("Fjee")
else:
print("Dolladollabilljoll")
|
[
"bjarki.agust@gmail.com"
] |
bjarki.agust@gmail.com
|
18939fdae293b7a96059b4ed05b61fab9a65a3e3
|
c4e97f2eb1081d8fad5e64872c3d6acf9a89d445
|
/Solutions/0135_candy.py
|
af76e71034174d61806ca337a98996a7aa0af28e
|
[] |
no_license
|
YoupengLi/leetcode-sorting
|
0efb3f4d7269c76a3ed11caa3ab48c8ab65fea25
|
3d9e0ad2f6ed92ec969556f75d97c51ea4854719
|
refs/heads/master
| 2020-05-18T23:28:51.363862
| 2019-09-12T00:42:14
| 2019-09-12T00:42:14
| 184,712,501
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/7/18 0018 09:54
# @Author : Youpeng Li
# @Site :
# @File : 0135_candy.py
# @Software: PyCharm
'''
135. Candy
There are N children standing in a line. Each child is assigned a rating value.
You are giving candies to these children subjected to the following requirements:
Each child must have at least one candy.
Children with a higher rating get more candies than their neighbors.
What is the minimum candies you must give?
Example 1:
Input: [1,0,2]
Output: 5
Explanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively.
Example 2:
Input: [1,2,2]
Output: 4
Explanation: You can allocate to the first, second and third child with 1, 2, 1 candies respectively.
The third child gets 1 candy because it satisfies the above two conditions.
'''
class Solution:
def candy(self, ratings: 'List[int]') -> 'int':
if not ratings:
return 0
res = [1] * len(ratings)
lbase, rbase = 1, 1
for i in range(1, len(ratings)): # 从左向右扫描
lbase = lbase + 1 if ratings[i] > ratings[i - 1] else 1
res[i] = lbase
for i in range(len(ratings) - 2, -1, -1): # 从右向左扫描
rbase = rbase + 1 if ratings[i] > ratings[i + 1] else 1
res[i] = max(rbase, res[i])
return sum(res)
def candy_1(self, ratings: 'List[int]') -> 'int':
peak = down = up = 0
res = 1
for i in range(1, len(ratings)):
if ratings[i - 1] < ratings[i]:
up += 1
down = 0
peak = up
res += 1 + up
elif ratings[i - 1] == ratings[i]:
up = down = peak = 0
res += 1
else:
up = 0
down += 1
res += 1 + down + ((-1) if peak >= down else 0)
return res
if __name__ == "__main__":
a = Solution()
ratings = [1, 0, 2]
print(a.candy(ratings))
print(a.candy_1(ratings))
ratings = [1, 2, 2]
print(a.candy(ratings))
print(a.candy_1(ratings))
ratings = [1, 2, 3, 2, 1, 0]
print(a.candy(ratings))
print(a.candy_1(ratings))
|
[
"noreply@github.com"
] |
YoupengLi.noreply@github.com
|
fcfd0e41293518178f353971f8e706f2fb7b44c2
|
8bbeb7b5721a9dbf40caa47a96e6961ceabb0128
|
/python3/212.Word Search II(单词搜索 II).py
|
b33e11ee9eccfd5d5831b32835b4be8c3ca57304
|
[
"MIT"
] |
permissive
|
lishulongVI/leetcode
|
bb5b75642f69dfaec0c2ee3e06369c715125b1ba
|
6731e128be0fd3c0bdfe885c1a409ac54b929597
|
refs/heads/master
| 2020-03-23T22:17:40.335970
| 2018-07-23T14:46:06
| 2018-07-23T14:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,685
|
py
|
"""
<p>Given a 2D board and a list of words from the dictionary, find all words in the board.</p>
<p>Each word must be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once in a word.</p>
<p><strong>Example:</strong></p>
<pre>
<strong>Input:</strong>
<b>words</b> = <code>["oath","pea","eat","rain"]</code> and <b>board </b>=
[
['<span style="color:#d70">o</span>','<span style="color:#d70">a</span>','a','n'],
['e','<span style="color:#d30">t</span>','<span style="color:#d00">a</span>','<span style="color:#d00">e</span>'],
['i','<span style="color:#d70">h</span>','k','r'],
['i','f','l','v']
]
<strong>Output: </strong><code>["eat","oath"]</code>
</pre>
<p><b>Note:</b><br />
You may assume that all inputs are consist of lowercase letters <code>a-z</code>.</p><p>给定一个二维网格 <strong>board </strong>和一个字典中的单词列表 <strong>words</strong>,找出所有同时在二维网格和字典中出现的单词。</p>
<p>单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。</p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>
<strong>words</strong> = <code>["oath","pea","eat","rain"]</code> and <strong>board </strong>=
[
['<strong>o</strong>','<strong>a</strong>','a','n'],
['e','<strong>t</strong>','<strong>a</strong>','<strong>e</strong>'],
['i','<strong>h</strong>','k','r'],
['i','f','l','v']
]
<strong>输出: </strong><code>["eat","oath"]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都由小写字母 <code>a-z</code> 组成。</p>
<p><strong>提示:</strong></p>
<ul>
<li>你需要优化回溯算法以通过更大数据量的测试。你能否早点停止回溯?</li>
<li>如果当前单词不存在于所有单词的前缀中,则可以立即停止回溯。什么样的数据结构可以有效地执行这样的操作?散列表是否可行?为什么? 前缀树如何?如果你想学习如何实现一个基本的前缀树,请先查看这个问题: <a href="/problems/implement-trie-prefix-tree/description/">实现Trie(前缀树)</a>。</li>
</ul>
<p>给定一个二维网格 <strong>board </strong>和一个字典中的单词列表 <strong>words</strong>,找出所有同时在二维网格和字典中出现的单词。</p>
<p>单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。</p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>
<strong>words</strong> = <code>["oath","pea","eat","rain"]</code> and <strong>board </strong>=
[
['<strong>o</strong>','<strong>a</strong>','a','n'],
['e','<strong>t</strong>','<strong>a</strong>','<strong>e</strong>'],
['i','<strong>h</strong>','k','r'],
['i','f','l','v']
]
<strong>输出: </strong><code>["eat","oath"]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都由小写字母 <code>a-z</code> 组成。</p>
<p><strong>提示:</strong></p>
<ul>
<li>你需要优化回溯算法以通过更大数据量的测试。你能否早点停止回溯?</li>
<li>如果当前单词不存在于所有单词的前缀中,则可以立即停止回溯。什么样的数据结构可以有效地执行这样的操作?散列表是否可行?为什么? 前缀树如何?如果你想学习如何实现一个基本的前缀树,请先查看这个问题: <a href="/problems/implement-trie-prefix-tree/description/">实现Trie(前缀树)</a>。</li>
</ul>
"""
class Solution:
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
|
[
"lishulong@wecash.net"
] |
lishulong@wecash.net
|
a1c525970930a33a6c0f1bf1920e3c29220e62b8
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/M/mgrollins/eu_location_scraper.py
|
f3f5904113f77d083f6938f71bc20e35e5486059
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
import scraperwiki
import lxml.html
from lxml import etree
html = scraperwiki.scrape("http://www.edd.ca.gov/jobs_and_training/Experience_Unlimited_Local_Information.htm")
root = lxml.html.fromstring(html)
locations = []
#for lel in root.cssselect("div.main_content"):
print "in lel loop"
for el in root.cssselect("div.content_left_column h2"):
if el.text_content() != "More Information":
locations.append(el.text_content())
print "in el loop"
# for lel in el.cssselect("*"):
# print lel.text_content()
# break
# place holder
for loc in locations:
print loc +",",
# scraperwiki.sqlite.save(unique_keys = ['locations'], data = locations)
# print lxml.html.tostring(el)import scraperwiki
import lxml.html
from lxml import etree
html = scraperwiki.scrape("http://www.edd.ca.gov/jobs_and_training/Experience_Unlimited_Local_Information.htm")
root = lxml.html.fromstring(html)
locations = []
#for lel in root.cssselect("div.main_content"):
print "in lel loop"
for el in root.cssselect("div.content_left_column h2"):
if el.text_content() != "More Information":
locations.append(el.text_content())
print "in el loop"
# for lel in el.cssselect("*"):
# print lel.text_content()
# break
# place holder
for loc in locations:
print loc +",",
# scraperwiki.sqlite.save(unique_keys = ['locations'], data = locations)
# print lxml.html.tostring(el)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
566aba3d4ea799a084e1bf8b391feb92af2aee30
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_f/security-group-egres_revoke.py
|
36344b5fae7779de59a4902f9442cdb2b9e42c4b
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
authorize-security-group-egress : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/authorize-security-group-egress.html
"""
write_parameter("ec2", "revoke-security-group-egress")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
29e1406fefde1df7fe0c61d695ea159a957875f9
|
81579ecd0678d652bbb57ff97529631fcfb74b12
|
/corehq/ex-submodules/dimagi/utils/tests/test_rate_limit.py
|
184a6cec42531b4957a7d54581d3e65eb21f137c
|
[
"BSD-3-Clause"
] |
permissive
|
dungeonmaster51/commcare-hq
|
64fece73671b03c1bca48cb9d1a58764d92796ea
|
1c70ce416564efa496fb4ef6e9130c188aea0f40
|
refs/heads/master
| 2022-12-03T21:50:26.035495
| 2020-08-11T07:34:59
| 2020-08-11T07:34:59
| 279,546,551
| 1
| 0
|
BSD-3-Clause
| 2020-07-31T06:13:03
| 2020-07-14T09:51:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
from dimagi.utils.rate_limit import rate_limit, DomainRateLimiter
from django.test import SimpleTestCase
# import the datetime module and not datetime.datetime:
# "datetime" has to be the datetime module since the tests/__init__.py file
# just imports * from all test files and the json_format_datetime doctest
# expects datetime to be the datetime module
import datetime
class RateLimitTestCase(SimpleTestCase):
def test_rate_limit(self):
start = datetime.datetime.utcnow()
rate_limit_count = 0
iteration_count = 0
while (datetime.datetime.utcnow() - start) < datetime.timedelta(seconds=5):
# Only allow 10 actions every 3 seconds in an 5 second period of time
if rate_limit('rate-limit-test', actions_allowed=10, how_often=3):
rate_limit_count += 1
iteration_count += 1
self.assertEqual(rate_limit_count, 20)
self.assertGreater(iteration_count, 20)
def test_domain_rate_limit(self):
rate_limiter = DomainRateLimiter('rate-limit-domain-', 10, 3)
domains = ('d1', 'd2')
domain_counts = {domain: 0 for domain in domains}
start = datetime.datetime.utcnow()
iteration_count = 0
while (datetime.datetime.utcnow() - start) < datetime.timedelta(seconds=5):
# Only allow 10 actions every 3 seconds in an 5 second period of time
for domain in domains:
if rate_limiter.can_perform_action(domain):
domain_counts[domain] += 1
iteration_count += 1
for domain in domains:
self.assertEqual(domain_counts[domain], 20)
self.assertGreater(iteration_count, 20)
|
[
"gcapalbo@dimagi.com"
] |
gcapalbo@dimagi.com
|
555fc6f9037745f8d467cab753ff6fb8e6cb83c3
|
12f43487042025b5d27a1fba104bf1a7ce1f8cee
|
/src/single_sided_node_v1.py
|
f5027365074a79801182b0c123a7d54d0e967595
|
[] |
no_license
|
goerz-research/trajoct
|
e6c22a591fea98c3e287d135265a532001423fa9
|
601ef68465dff77552838c38bcbdfef510325289
|
refs/heads/master
| 2021-09-16T10:50:07.023190
| 2018-06-19T18:24:27
| 2018-06-19T18:24:27
| 79,431,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
"""Description of nodes consisting of a single-sided cavity with an atom
inside"""
import sympy
from sympy import symbols, sqrt
from qnet.algebra.hilbert_space_algebra import LocalSpace
from qnet.algebra.operator_algebra import Destroy, LocalSigma
from qnet.algebra.circuit_algebra import SLH, identity_matrix
def dagger(op):
return op.adjoint()
def syms_ops(node_index, n_cavity):
"""Define symbols and operators for a single node, required to write the
SLH for a single node"""
HilAtom = LocalSpace('q%d' % int(node_index), basis=('g', 'e'),
order_index=(2*node_index))
HilCavity = LocalSpace('c%d' % int(node_index), dimension=n_cavity,
order_index=(2*node_index+1))
Sym = {}
Sym['Delta'] = symbols(r'Delta_%s' % node_index, real=True)
Sym['g'] = symbols(r'g_%s' % node_index, positive=True)
Sym['Omega'] = symbols(r'Omega_%s' % node_index)
Sym['I'] = sympy.I
Sym['kappa'] = symbols(r'kappa', positive=True)
Op = {}
Op['a'] = Destroy(hs=HilCavity)
Op['|g><g|'] = LocalSigma('g', 'g', hs=HilAtom)
Op['|e><e|'] = LocalSigma('e', 'e', hs=HilAtom)
Op['|e><g|'] = LocalSigma('e', 'g', hs=HilAtom)
return Sym, Op
def node_hamiltonian(Sym, Op):
"""Symbolic Hamiltonian for a single node, in the RWA"""
# Symbols
Δ, g, Ω, I = (Sym['Delta'], Sym['g'], Sym['Omega'], Sym['I'])
δ = g**2 / Δ
# Cavity operators
Op_a = Op['a']; Op_a_dag = dagger(Op_a); Op_n = Op_a_dag * Op_a
# Qubit operators
Op_gg = Op['|g><g|']; Op_eg = Op['|e><g|']; Op_ge = dagger(Op_eg)
# Hamiltonian
H = -δ * Op_n + (g**2/Δ) * Op_n * Op_gg \
-I * (g / (2*Δ)) * Ω * (Op_eg*Op_a - Op_ge*Op_a_dag)
return H
def node_slh(node_index, n_cavity):
"""SLH description for a single node with the given `node_index` (which
will become the subscript in all symbols) and `n_cavity` number of levels
for the cavity
"""
Sym, Op = syms_ops(node_index, n_cavity)
S = identity_matrix(1)
κ = Sym['kappa']
L = [sqrt(2 * κ) * Op['a'], ]
H = node_hamiltonian(Sym, Op)
return SLH(S, L, H)
|
[
"goerz@stanford.edu"
] |
goerz@stanford.edu
|
8f7e338e2a4ee08be8821b94e9b121b3d4183900
|
1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e
|
/xcp2k/classes/_program_run_info48.py
|
ad7fd8d53c9700a84b6260b359b618ea1c560e29
|
[] |
no_license
|
Roolthasiva/xcp2k
|
66b2f30ebeae1a946b81f71d22f97ea4076e11dc
|
fc3b5885503c6f6dc549efeb4f89f61c8b6b8242
|
refs/heads/master
| 2022-12-23T06:03:14.033521
| 2020-10-07T08:01:48
| 2020-10-07T08:01:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
from xcp2k.inputsection import InputSection
from xcp2k.classes._each400 import _each400
class _program_run_info48(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each400()
self._name = "PROGRAM_RUN_INFO"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ecf84eae3c1133e0064c0034147e1be38ac43df8
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/pacbiolib/pacbio/pythonpkgs/pbfalcon/lib/python2.7/site-packages/pbfalcon/cli/hgap_run.py
|
39e6bd872b1c9ab4cffaeaca7d0be2a224123c6d
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from falcon_polish.pypeflow import hgap
import argparse
import sys
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logging',
help='.ini or .json config file for Python logging module')
parser.add_argument('config',
help='.ini or .json of HGAP config. Available sections: "general", "hgap", "falcon", "pbsmrtpipe", "blasr", "quiver", ...')
args = parser.parse_args(argv[1:])
return hgap.run(args.config, args.logging)
if __name__ == "__main__":
main(sys.argv)
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
cffe0a59e77bcf26414b072c0e182159fac32345
|
82948269c85dd0cce4f1337f75b28b5edf6d81ef
|
/eval.py
|
6b58ef22874feb9bea385f8a78b0cae4bdc90d21
|
[] |
no_license
|
zhangwenhao123/Underwater-Color-Correction
|
291c5f7131855832d58ddd6a02fd0a334c3f13d3
|
b25bd697bfd206afb9b3d1ad58d4ff4dfc850b2e
|
refs/heads/master
| 2021-05-11T03:43:07.330895
| 2018-01-17T22:48:46
| 2018-01-17T22:48:46
| 117,921,125
| 0
| 0
| null | 2018-01-18T02:33:36
| 2018-01-18T02:33:36
| null |
UTF-8
|
Python
| false
| false
| 4,072
|
py
|
'''
Evaluation File
'''
import cPickle as pickle
import tensorflow as tf
from scipy import misc
from tqdm import tqdm
import numpy as np
import argparse
import random
import ntpath
import sys
import os
import time
import time
import glob
import cPickle as pickle
from tqdm import tqdm
sys.path.insert(0, 'ops/')
sys.path.insert(0, 'nets/')
from tf_ops import *
import data_ops
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'You must provide an info.pkl file'
exit()
pkl_file = open(sys.argv[1], 'rb')
a = pickle.load(pkl_file)
LEARNING_RATE = a['LEARNING_RATE']
LOSS_METHOD = a['LOSS_METHOD']
BATCH_SIZE = a['BATCH_SIZE']
L1_WEIGHT = a['L1_WEIGHT']
IG_WEIGHT = a['IG_WEIGHT']
NETWORK = a['NETWORK']
EPOCHS = a['EPOCHS']
DATA = a['DATA']
EXPERIMENT_DIR = 'checkpoints/LOSS_METHOD_'+LOSS_METHOD\
+'/NETWORK_'+NETWORK\
+'/L1_WEIGHT_'+str(L1_WEIGHT)\
+'/IG_WEIGHT_'+str(IG_WEIGHT)\
+'/DATA_'+DATA+'/'\
IMAGES_DIR = EXPERIMENT_DIR+'test_images/'
print
print 'Creating',IMAGES_DIR
try: os.makedirs(IMAGES_DIR)
except: pass
print
print 'LEARNING_RATE: ',LEARNING_RATE
print 'LOSS_METHOD: ',LOSS_METHOD
print 'BATCH_SIZE: ',BATCH_SIZE
print 'L1_WEIGHT: ',L1_WEIGHT
print 'IG_WEIGHT: ',IG_WEIGHT
print 'NETWORK: ',NETWORK
print 'EPOCHS: ',EPOCHS
print 'DATA: ',DATA
print
if NETWORK == 'pix2pix': from pix2pix import *
if NETWORK == 'resnet': from resnet import *
# global step that is saved with a model to keep track of how many steps/epochs
global_step = tf.Variable(0, name='global_step', trainable=False)
# underwater image
image_u = tf.placeholder(tf.float32, shape=(1, 256, 256, 3), name='image_u')
# generated corrected colors
gen_image = netG(image_u, LOSS_METHOD)
saver = tf.train.Saver(max_to_keep=1)
init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
sess = tf.Session()
sess.run(init)
ckpt = tf.train.get_checkpoint_state(EXPERIMENT_DIR)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring previous model..."
try:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored"
except:
print "Could not restore model"
pass
step = int(sess.run(global_step))
# testing paths
'''
exts = ['*.jpg', '*.jpeg', '*.JPEG', '*.png']
test_paths = []
for ex in exts:
test_paths.extend(glob.glob('datasets/'+DATA+'/test/'+ex))
test_paths = np.asarray(test_paths)
'''
#test_paths = sorted(np.asarray(glob.glob('/mnt/data2/images/underwater/youtube/diving1/*.jpg')))
test_paths = sorted(np.asarray(glob.glob('/mnt/data1/videos/barbados/2018/images/*.png')))
IMAGES_DIR = '/mnt/data1/videos/barbados/2018/out_images/'
#random.shuffle(test_paths)
num_test = len(test_paths)
print 'num test:',num_test
print 'IMAGES_DIR:',IMAGES_DIR
c = 0
times = []
for img_path in tqdm(test_paths):
img_name = ntpath.basename(img_path)
img_name = img_name.split('.')[0]
batch_images = np.empty((1, 256, 256, 3), dtype=np.float32)
a_img = misc.imread(img_path).astype('float32')
a_img = misc.imresize(a_img, (256, 256, 3))
a_img = data_ops.preprocess(a_img)
batch_images[0, ...] = a_img
s = time.time()
gen_images = np.asarray(sess.run(gen_image, feed_dict={image_u:batch_images}))
tot = time.time()-s
times.append(tot)
for gen, real in zip(gen_images, batch_images):
#misc.imsave(IMAGES_DIR+str(step)+'_'+str(c)+'_real.png', real)
#misc.imsave(IMAGES_DIR+str(step)+'_'+str(c)+'_gen.png', gen)
misc.imsave(IMAGES_DIR+img_name+'_real.png', real)
misc.imsave(IMAGES_DIR+img_name+'_gen.png', gen)
c += 1
print
print 'average time:',np.mean(np.asarray(times))
print
|
[
"cameronfabbri@gmail.com"
] |
cameronfabbri@gmail.com
|
327694995f36b39033eb39821a28e85f2af31c2a
|
e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1
|
/BitManipulation/test_q136_single_number.py
|
42836c696458ba313f72ead4768f330aefb9e302
|
[] |
no_license
|
sevenhe716/LeetCode
|
41d2ef18f5cb317858c9b69d00bcccb743cbdf48
|
4a1747b6497305f3821612d9c358a6795b1690da
|
refs/heads/master
| 2020-03-16T16:12:27.461172
| 2019-04-22T13:27:54
| 2019-04-22T13:27:54
| 130,221,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import unittest
from BitManipulation.q136_single_number import Solution
class TestSingleNumber(unittest.TestCase):
"""Test q136_single_number.py"""
def test_single_number(self):
s = Solution()
self.assertEqual(1, s.singleNumber([2, 2, 1]))
self.assertEqual(4, s.singleNumber([4, 1, 2, 1, 2]))
if __name__ == '__main__':
unittest.main()
|
[
"429134862@qq.com"
] |
429134862@qq.com
|
22844e0fe9f2ac64b52c240fca902ba6fa38396f
|
a2830d10e5bb5d559ea0b3c209cee46d25820ea6
|
/tests/integration/network/connect_nodes_test.py
|
3829da79ad9dea71a59cac4c52138105caceff9f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
vishalbelsare/PySyft
|
623e41c472a1e66bf3918fdb11399a6f20112fc0
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
refs/heads/dev
| 2023-08-21T21:56:37.319567
| 2021-11-23T07:20:09
| 2021-11-23T07:20:09
| 214,707,883
| 0
| 0
|
Apache-2.0
| 2021-11-25T04:54:37
| 2019-10-12T20:03:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
# third party
import pytest
import requests
# syft absolute
import syft as sy
NETWORK_PORT = 9081
NETWORK_PUBLIC_HOST = f"docker-host:{NETWORK_PORT}"
DOMAIN1_PORT = 9082
DOMAIN2_PORT = 9083
NETWORK_VPN_IP = "100.64.0.1"
DOMAIN1_VPN_IP = "100.64.0.2"
DOMAIN2_VPN_IP = "100.64.0.3"
TEST_ROOT_EMAIL = "info@openmined.org"
TEST_ROOT_PASS = "changethis"
def join_to_network_python(
email: str, password: str, port: int, network_host: str
) -> None:
root_client = sy.login(email=email, password=password, port=port)
# test Syft API
root_client.join_network(host_or_ip=network_host)
response = root_client.vpn_status()
return response
def join_to_network_rest(
email: str, password: str, port: int, network_host: str
) -> None:
url = f"http://localhost:{port}/api/v1/login"
auth_response = requests.post(url, json={"email": email, "password": password})
auth = auth_response.json()
# test HTTP API
url = f"http://localhost:{port}/api/v1/vpn/join/{network_host}"
headers = {"Authorization": f"Bearer {auth['access_token']}"}
response = requests.post(url, headers=headers)
result = response.json()
return result
def run_network_tests(port: int, hostname: str, vpn_ip: str) -> None:
response = join_to_network_python(
email=TEST_ROOT_EMAIL,
password=TEST_ROOT_PASS,
port=port,
network_host=NETWORK_PUBLIC_HOST,
)
assert response["status"] == "ok"
host = response["host"]
assert host["ip"] == vpn_ip
assert host["hostname"] == hostname
assert host["os"] == "linux"
response = join_to_network_rest(
email=TEST_ROOT_EMAIL,
password=TEST_ROOT_PASS,
port=port,
network_host=NETWORK_PUBLIC_HOST,
)
assert response["status"] == "ok"
@pytest.mark.network
def test_connect_network_to_network() -> None:
run_network_tests(
port=NETWORK_PORT, hostname="test_network_1", vpn_ip=NETWORK_VPN_IP
)
@pytest.mark.network
def test_connect_domain1_to_network() -> None:
run_network_tests(
port=DOMAIN1_PORT, hostname="test_domain_1", vpn_ip=DOMAIN1_VPN_IP
)
@pytest.mark.network
def test_connect_domain2_to_network() -> None:
run_network_tests(
port=DOMAIN2_PORT, hostname="test_domain_2", vpn_ip=DOMAIN2_VPN_IP
)
|
[
"me@madhavajay.com"
] |
me@madhavajay.com
|
35a789b942ec3a960db601b55fe92fd6f20c6e1b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_triptych.py
|
326fed97b6911ff86e26d493fa295989fe50f35a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
#calss header
class _TRIPTYCH():
def __init__(self,):
self.name = "TRIPTYCH"
self.definitions = [u'a piece of art made of three paintings connected to each other in a way that allows the two outer ones to fold in towards the larger central one: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0a8424d606394ea8ae293cf99d4325fb99aa7704
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/universal_record_import_service_port_type_service_output.py
|
785ca9abe0e122f6bfffeea1000df21f8bfa7744
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,555
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.error_info_1 import ErrorInfo1
from travelport.models.universal_record_import_rsp import UniversalRecordImportRsp
__NAMESPACE__ = "http://www.travelport.com/service/air_v52_0"
@dataclass
class UniversalRecordImportServicePortTypeServiceOutput:
class Meta:
name = "Envelope"
namespace = "http://schemas.xmlsoap.org/soap/envelope/"
body: None | UniversalRecordImportServicePortTypeServiceOutput.Body = field(
default=None,
metadata={
"name": "Body",
"type": "Element",
}
)
@dataclass
class Body:
universal_record_import_rsp: None | UniversalRecordImportRsp = field(
default=None,
metadata={
"name": "UniversalRecordImportRsp",
"type": "Element",
"namespace": "http://www.travelport.com/schema/universal_v52_0",
}
)
fault: None | UniversalRecordImportServicePortTypeServiceOutput.Body.Fault = field(
default=None,
metadata={
"name": "Fault",
"type": "Element",
}
)
@dataclass
class Fault:
faultcode: None | str = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultstring: None | str = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
faultactor: None | str = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
detail: None | UniversalRecordImportServicePortTypeServiceOutput.Body.Fault.Detail = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Detail:
error_info: None | ErrorInfo1 = field(
default=None,
metadata={
"name": "ErrorInfo",
"type": "Element",
"namespace": "http://www.travelport.com/schema/common_v52_0",
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
dcf147953063c4991c197cbe6efdbcb60532bb65
|
7b3871759d61004217100ce1a858b1acd20c6166
|
/study/day3/使用list()函数将range()函数输出的值输出为列表.py
|
c996564c86e1de7062102d16ba6cff55f3bd5ef7
|
[] |
no_license
|
yidaiweiren/Python
|
74bcecfe32cef25e3f5692b3a3ebf1309cbe8e00
|
986a51cc59f0ffa90c967b62a3d729bb034c273d
|
refs/heads/master
| 2021-07-25T00:27:52.970745
| 2020-06-05T07:36:35
| 2020-06-05T07:36:35
| 183,752,758
| 3
| 1
| null | 2019-04-28T03:35:01
| 2019-04-27T09:20:56
|
Python
|
UTF-8
|
Python
| false
| false
| 235
|
py
|
#使用list()函数将range()函数输出的值输出为列表
#思路
#range()输出一组数字
#把range()函数输出的值作为参数传输值list()函数
num=list(range(1,9))
print (num)
#结果
'''
[1, 2, 3, 4, 5, 6, 7, 8]
'''
|
[
"2577110429@qq.com"
] |
2577110429@qq.com
|
1376040d7ea44397eb2be58d3c4c49bbb5b2748a
|
425b5719ecf6b40bf3de94ddf6e0cc9cf72717b7
|
/app/events/regions.py
|
176df6e4c2fdb4eb0f39440397ba757453882183
|
[
"MIT"
] |
permissive
|
zerorock1312/lt-maker-master
|
3b9b2e7245215936018601432a98915c40f3937d
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
refs/heads/main
| 2023-06-04T10:28:43.931841
| 2021-06-18T06:03:40
| 2021-06-18T06:03:40
| 378,050,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
from app.utilities.data import Prefab
region_types = ['normal', 'status', 'event', 'formation']
class Region(Prefab):
def __init__(self, nid):
self.nid = nid
self.region_type = 'normal'
self.position = None
self.size = [1, 1]
self.sub_nid = None
self.condition = 'True'
self.only_once = False
@property
def area(self):
return self.size[0] * self.size[1]
@property
def center(self) -> tuple:
if self.position:
x = int(self.position[0] + self.size[0] // 2)
y = int(self.position[1] + self.size[1] // 2)
return x, y
else:
return None
def contains(self, pos: tuple) -> bool:
x, y = pos
if self.position:
return self.position[0] <= x < self.position[0] + self.size[0] and \
self.position[1] <= y < self.position[1] + self.size[1]
else:
return False
def fuzzy_contains(self, pos: tuple) -> bool:
x, y = pos
fuzz = 0.4
if self.position:
return self.position[0] - fuzz <= x < self.position[0] + self.size[0] + fuzz and \
self.position[1] - fuzz <= y < self.position[1] + self.size[1] + fuzz
else:
return False
def get_all_positions(self):
if self.position:
positions = []
for i in range(self.position[0], self.position[0] + self.size[0]):
for j in range(self.position[1], self.position[1] + self.size[1]):
positions.append((i, j))
return positions
else:
return []
@classmethod
def default(cls):
return cls('None')
|
[
"85828552+zerorock1312@users.noreply.github.com"
] |
85828552+zerorock1312@users.noreply.github.com
|
d1a3507e16a5cbea29be05f612b36e42c86cbd03
|
5e6bb81b207f3306bca3a2412dcc86525ff09b51
|
/Django_test01/mysql_test2000.py
|
126dffb3de527794b524ceaf3e3241e6463f044c
|
[] |
no_license
|
ssk1987/FullStackExerciseLibrary
|
f16ad4a0ab2ce6864d00905738db0832a8e916a1
|
e050bffce3d8497b47980aab30ea99409f61856b
|
refs/heads/master
| 2021-06-03T02:40:06.840380
| 2021-03-27T11:37:22
| 2021-03-27T11:37:22
| 254,331,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
import pymysql
# 连接数据库
db = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='12345678',
database='country1',
charset='utf8')
# 获取游标
cur = db.cursor()
# 插入数据
data_list = []
for x in range(200000):
name = 'Py87_%s' % x
data_list.append(name)
# 插入语句
# 一个包最大 allowed_packet is 10586
# max_stmt_length = 1024000 ins sql语句的字符串长度
ins = 'insert into students(name) values(%s)'
# 批量插入数据
# 每一次插入都是一次磁盘网络IO 提高单位频率上的效率
cur.executemany(ins, data_list)
# 提交
db.commit()
# 关闭
cur.close()
db.close()
|
[
"10293665@qq.com"
] |
10293665@qq.com
|
c220c97fed107071513fbfb21f2e2b337a46b158
|
d03a874a5ba8303cdcedf88350bb3cae2c98244a
|
/cifar10_cnn.py
|
6a0aebaf8ea16da8087269b1e30966773f223d2c
|
[] |
no_license
|
yaroslavvb/whitening
|
f8b24624d751c7b8b6245eb37859ba527b850814
|
7d071c25c8a9ff9cc624b608013f097646ca3b5e
|
refs/heads/master
| 2021-07-09T16:13:44.707126
| 2017-10-10T22:07:49
| 2017-10-10T22:07:49
| 90,409,782
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
'''
CIFAR-10 example from https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py
Now with weight normalization. Lines 64 and 69 contain the changes w.r.t. original.
'''
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import optimizers
import util as u
import sys
import numpy as np
if len(sys.argv)<2:
assert False
if sys.argv[1]=='sgd':
prefix='keras_sgd'
optimizer='sgd'
elif sys.argv[1]=='sgd_wn':
prefix='keras_sgd_wn'
optimizer='sgd_wn'
elif sys.argv[1]=='adam':
prefix='keras_adam'
optimizer='adam'
elif sys.argv[1]=='adam_wn':
prefix='keras_adam_wn'
optimizer='adam_wn'
else:
assert False
batch_size = 5000
nb_classes = 10
nb_epoch = 10
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original). EDIT: now with weight normalization, so slightly more original ;-)
from weightnorm import SGDWithWeightnorm
from weightnorm import AdamWithWeightnorm
sgd_wn = SGDWithWeightnorm(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adam = optimizers.Adam()
adam_wn = AdamWithWeightnorm()
if optimizer == 'sgd':
optimizer=sgd
elif optimizer == 'sgd_wn':
optimizer=sgd_wn
elif optimizer == 'adam':
optimizer=adam
elif optimizer == 'adam_wn':
optimizer=adam_wn
else:
assert False
model.compile(loss='categorical_crossentropy',optimizer=optimizer,
metrics=['accuracy'])
# data based initialization of parameters
from weightnorm import data_based_init
data_based_init(model, X_train[:100])
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
# fit the model on the batches generated by datagen.flow()
result = model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
acc_hist = np.asarray(result.history['acc'])
u.dump(acc_hist, "%s_losses.csv"%(prefix,))
|
[
"yaroslavvb@gmail.com"
] |
yaroslavvb@gmail.com
|
550f6cd08f39601e6c9a1a69dd90d5f9e88c3746
|
c73beb04d101ca8d98c9126b1c47b4f19cc35066
|
/week1/single_process.py
|
36f42dc91f0d31ea01767b8f1165e0fd3deaa8a5
|
[] |
no_license
|
fywest/python
|
a5ecf62e1f8cdf59c936da81b478c371f169aec4
|
cd97438679d8e129b3cb75d76226b16e7e7850ac
|
refs/heads/master
| 2022-12-13T06:15:04.021492
| 2019-05-28T19:21:18
| 2019-05-28T19:21:18
| 130,403,136
| 0
| 0
| null | 2022-12-08T05:08:55
| 2018-04-20T19:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
import time
def io_task():
time.sleep(1)
def main():
start_time=time.time()
for i in range(5):
io_task()
end_time=time.time()
print('it takes: {:.2f}s'.format(end_time-start_time))
if __name__=='__main__':
main()
|
[
"fywest2109@hotmail.com"
] |
fywest2109@hotmail.com
|
a6c26522a2da68de30a9caebeb0d89d3cde0854f
|
82db461036ffb2adbf0424a6f0575cd9d24b48a8
|
/library/signal/demo_signal.py
|
c59427f814a4ca24d3efc82da3ddfcf5469043dd
|
[] |
no_license
|
webclinic017/option_pdt
|
fdc559f02cc529b54278e90e04170713fe93684f
|
dd302c6b2661e26dbfcbea0384b99e85ae9584e1
|
refs/heads/master
| 2023-03-24T10:43:35.998775
| 2021-03-19T14:08:38
| 2021-03-19T14:08:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
from base.signal_base import SignalBase
class Signal(SignalBase):
def __init__(self, spot_instrument, future_instrument):
super().__init__(spot_instrument, future_instrument)
self.spot_instrument = spot_instrument
self.future_instrument = future_instrument
self.spot_price = float('nan')
self.future_price = float('nan')
self.subscription_list = [f'{_}|1s' for _ in [spot_instrument, future_instrument]]
def on_market_data_1s_ready(self, data):
instrument = '|'.join([data['exchange', 'symbol', 'contract_type']])
if instrument == self.spot_instrument:
self.spot_price = data['metadata']['mid']
else:
self.future_price = data['metadata']['mid']
self.value = self.future_price / self.spot_price - 1
def from_hist_data(self, mds):
spot_orderbook = mds[self.subscription_list[0]]
fut_orderbook = mds[self.subscription_list[1]]
df = spot_orderbook[['local_timestamp']].copy()
df['value'] = fut_orderbook['mid'] / spot_orderbook['mid'] - 1
return df
|
[
"noreply@github.com"
] |
webclinic017.noreply@github.com
|
4364d3d0e4e45b61777839885827ff630e4e8965
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/hoework01/gettop10frommaoyam01_20200625232557.py
|
6eb6ccba3f51b68abc25670be539f7e1cf36cf46
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
# 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593075273346.1593075275703.6; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; mojo-session-id={"id":"dd5ec1780230b10b3b01a18882424620","time":1593078373432}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593078727; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-trace-id=3; _lxsdk_s=172eade6a22-b72-c5-308%7C%7C6',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
}
response = requests.get(maoyanUrl,headers=header)
response.encoding = 'utf-8'
bs_info = bs(response.text,"html.parser")
# print(response.text)
for tags in bs_info.find_all('div',altrs={'id':'container'}):
print(tags)
for tag in tags.find_all('a',):
print(tag.get('href'))
print(tag.get('title'))
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
15c9a7f22850db515e4e18371917447d643d5ef9
|
8be39fd741cbbb08439433188ca1bc59ee5cf11f
|
/data_handler/logistic_parser.py
|
e6aef4d466d44f479d23b3cf53d7c015e2c3c050
|
[
"MIT"
] |
permissive
|
tpimentelms/fast-conversational-banking
|
ca1e699261f989f3b535a50782062c000985ba1e
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
refs/heads/master
| 2021-03-16T21:30:51.253223
| 2018-02-28T20:30:15
| 2018-02-28T20:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from .data_parser import DataParser
class LogisticDataParser(DataParser):
def __init__(self, max_len, cuda=True, quiet=True, remove_brackets=False):
super(LogisticDataParser, self).__init__(max_len, cuda=cuda, quiet=quiet)
self.remove_brackets = remove_brackets
def normalize_string(self, s):
if self.remove_brackets:
s = [x for x in s if x not in ['(', ')', ',']]
return s
|
[
"tiagopms@gmail.com"
] |
tiagopms@gmail.com
|
58fc0eca94dc7087f6281521d213dd735c29351d
|
30cffb7452220c2ac2961dd2e0f42e3b359a59c0
|
/simscale_sdk/models/one_of_solid_simulation_control_pseudo_time_stepping.py
|
fa0feb65ab655b8e7c0d7bac2d73c1a47ecd1ee9
|
[
"MIT"
] |
permissive
|
vpurcarea/simscale-python-sdk
|
0bf892d8824f8d4599caa0f345d5ba28e038f5eb
|
6f2d12b2d21142bd854042c0fb402c2c797629e4
|
refs/heads/master
| 2023-03-14T04:31:06.226337
| 2021-03-03T16:20:01
| 2021-03-03T16:20:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,107
|
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfSolidSimulationControlPseudoTimeStepping(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'static_timesteps': 'DimensionalTime',
'simulation_intervals': 'DimensionalTime',
'timestep_length': 'RestrictedDimensionalFunctionTime'
}
attribute_map = {
'type': 'type',
'static_timesteps': 'staticTimesteps',
'simulation_intervals': 'simulationIntervals',
'timestep_length': 'timestepLength'
}
discriminator_value_class_map = {
'SINGLE_STEP': 'SingleStepPseudoTimeStepping',
'STEPPING_LIST_V18': 'SteppingListPseudoTimeStepping'
}
def __init__(self, type='STEPPING_LIST_V18', static_timesteps=None, simulation_intervals=None, timestep_length=None, local_vars_configuration=None): # noqa: E501
"""OneOfSolidSimulationControlPseudoTimeStepping - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._static_timesteps = None
self._simulation_intervals = None
self._timestep_length = None
self.discriminator = 'type'
self.type = type
if static_timesteps is not None:
self.static_timesteps = static_timesteps
if simulation_intervals is not None:
self.simulation_intervals = simulation_intervals
if timestep_length is not None:
self.timestep_length = timestep_length
@property
def type(self):
"""Gets the type of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:return: The type of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfSolidSimulationControlPseudoTimeStepping.
:param type: The type of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def static_timesteps(self):
"""Gets the static_timesteps of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:return: The static_timesteps of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:rtype: DimensionalTime
"""
return self._static_timesteps
@static_timesteps.setter
def static_timesteps(self, static_timesteps):
"""Sets the static_timesteps of this OneOfSolidSimulationControlPseudoTimeStepping.
:param static_timesteps: The static_timesteps of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:type: DimensionalTime
"""
self._static_timesteps = static_timesteps
@property
def simulation_intervals(self):
"""Gets the simulation_intervals of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:return: The simulation_intervals of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:rtype: DimensionalTime
"""
return self._simulation_intervals
@simulation_intervals.setter
def simulation_intervals(self, simulation_intervals):
"""Sets the simulation_intervals of this OneOfSolidSimulationControlPseudoTimeStepping.
:param simulation_intervals: The simulation_intervals of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:type: DimensionalTime
"""
self._simulation_intervals = simulation_intervals
@property
def timestep_length(self):
"""Gets the timestep_length of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:return: The timestep_length of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:rtype: RestrictedDimensionalFunctionTime
"""
return self._timestep_length
@timestep_length.setter
def timestep_length(self, timestep_length):
"""Sets the timestep_length of this OneOfSolidSimulationControlPseudoTimeStepping.
:param timestep_length: The timestep_length of this OneOfSolidSimulationControlPseudoTimeStepping. # noqa: E501
:type: RestrictedDimensionalFunctionTime
"""
self._timestep_length = timestep_length
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfSolidSimulationControlPseudoTimeStepping):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfSolidSimulationControlPseudoTimeStepping):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
b42b8f6ed64adddc457f57db5aa9a253495901dd
|
084a13b6524e21914826e842eeefefd09570a970
|
/experiments/atari_hard/montezuma_revenge/ppo_cnd_102_2.py
|
833f9590c550cdf94ff45ade8163c8d0dfd4556c
|
[
"MIT"
] |
permissive
|
michalnand/reinforcement_learning
|
28aa0e2c92b6112cf366eff0e0d6a78b9a56e94f
|
01635014a37a4c871766b4cdd2caaa26a0c2d8cc
|
refs/heads/main
| 2023-06-01T10:27:36.601631
| 2023-02-12T19:46:01
| 2023-02-12T19:46:01
| 217,841,101
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
import time
import torch
import RLAgents
import models.ppo_cnd_102_2.src.model_ppo as ModelPPO
import models.ppo_cnd_102_2.src.model_cnd_target as ModelCNDTarget
import models.ppo_cnd_102_2.src.model_cnd as ModelCND
import models.ppo_cnd_102_2.src.config as Config
#torch.cuda.set_device("cuda:0")
#print("running on ", torch.cuda.get_device_name())
path = "models/ppo_cnd_102_2/"
config = Config.Config()
#config.envs_count = 1
envs = RLAgents.MultiEnvParallelOptimised("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezuma, config.envs_count)
#envs = RLAgents.MultiEnvSeq("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezuma, config.envs_count)
#envs = RLAgents.MultiEnvSeq("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezumaVideo, config.envs_count)
agent = RLAgents.AgentPPOCND(envs, ModelPPO, ModelCNDTarget, ModelCND, config)
max_iterations = 500000
trainig = RLAgents.TrainingIterations(envs, agent, max_iterations, path, 128)
trainig.run()
'''
agent.load(path)
agent.disable_training()
while True:
reward, done, _ = agent.main()
envs.render(0)
#time.sleep(0.01)
'''
|
[
"michal.nand@gmail.com"
] |
michal.nand@gmail.com
|
452809658c1a86e94a421c66c5acc6bee7d001ae
|
d86c52f4098fd9c1a102c2d3f5630556e0610fa2
|
/fitle/myenv/Lib/site-packages/django/db/migrations/operations/base.py
|
0cd0ff38a0d70b87f180e2140d1dff4c187e2c1a
|
[] |
no_license
|
makadama/bitbucket
|
24f05c4946168ed15d4f56bfdc45fd6c0774e0f2
|
cabfd551b92fe1af6d9d14ab9eb3d9974b64aa79
|
refs/heads/master
| 2023-06-19T19:04:03.894599
| 2021-07-15T12:10:39
| 2021-07-15T12:10:39
| 385,203,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fabe9ce10ec2202053f79ef81fbddd479a6ff7812ab28cd2e4fd9a357f15c939
size 5016
|
[
"adamamakhtarsow@gmail.com"
] |
adamamakhtarsow@gmail.com
|
076b7d58c5592f6388a9ee56ad93f451e1d154e1
|
cdfb77f5fb782ed8c731c6789ba154fefb34b830
|
/Seção 4/tipo_booleano.py
|
537bf355e9805854ef01d78c4a063d4455a7b447
|
[] |
no_license
|
Yuri-Santiago/curso-udemy-python
|
7dc83e0ade45e8d959ce12b81098a13617e0a7ca
|
2af0ddad01b08f6afd0bfe35648212d4ee49f52b
|
refs/heads/master
| 2023-04-21T07:11:35.594753
| 2021-05-18T05:14:56
| 2021-05-18T05:14:56
| 350,412,085
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
"""
Tio Booleano
Álgebra Booleana, criada por George Boole
2 constantes: Verdadeiro ou Falso
True -> Verdadeiro
False -> Falso
A primeira letra precisa ser maiúscula
"""
falso = False
verdadeiro = True
print(falso)
print(verdadeiro)
print(type(verdadeiro))
# Operações
# Negação(not) : troca do valor atual para o seu contrário
print(not falso)
print(not verdadeiro)
# Ou(or) : é uma operação binária onde um ou o outro precisa ser verdadeiro para retornar verdadeiro
print(falso or verdadeiro)
# E(and) : é uma operação binária onde ambos os valores devem ser verdadeiro para retornar verdadeiro
print(falso and verdadeiro)
print(not falso and verdadeiro)
# Você pode comparar valores e variáveis para retornar um valor booleano
print(5 < 6)
print(3 > 4)
|
[
"yurimateussantiago@gmail.com"
] |
yurimateussantiago@gmail.com
|
0c96381cfe7098294ceee4776eec3458bc497a29
|
ede96590eee4880ff83d1f1d8db5229e92c6e919
|
/leasing/migrations/0042_contract_sign_fields.py
|
84877db9dd92cea1360228eff5704d0bf76a4a3d
|
[
"MIT"
] |
permissive
|
igordavydsson/mvj
|
a4c5b39e7be9f95e15a2e906ad61b98611998063
|
b467c6229f9d458d56b66f628b0841adb67a2970
|
refs/heads/master
| 2020-04-22T20:42:06.650182
| 2019-02-12T13:50:57
| 2019-02-12T13:50:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# Generated by Django 2.1.5 on 2019-02-12 07:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leasing', '0041_add_collateral'),
]
operations = [
migrations.AddField(
model_name='contract',
name='first_call_sent',
field=models.DateField(blank=True, null=True, verbose_name='First call sent'),
),
migrations.AddField(
model_name='contract',
name='second_call_sent',
field=models.DateField(blank=True, null=True, verbose_name='Second call sent'),
),
migrations.AddField(
model_name='contract',
name='sign_by_date',
field=models.DateField(blank=True, null=True, verbose_name='Sign by date'),
),
migrations.AddField(
model_name='contract',
name='third_call_sent',
field=models.DateField(blank=True, null=True, verbose_name='Third call sent'),
),
]
|
[
"mikko.keskinen@anders.fi"
] |
mikko.keskinen@anders.fi
|
64c7384fef7e4dd6391e4f314b57c6de80fca160
|
321116aad628f819e13e4e033d819b1bc8ed5c78
|
/deploy/dj_scaffold.wsgi
|
a88cd18489656640d0a1116a2183f5c874e0fcc2
|
[] |
no_license
|
sakishum/timeline-site
|
ccdd9fb34d69e3ed931d9e984934b885cef02a24
|
59981dac0eaa09aed5413dbd1932d3294be958d8
|
refs/heads/master
| 2021-04-28T07:16:11.941592
| 2017-03-06T02:48:20
| 2017-03-06T02:48:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
wsgi
|
import os
import site
from dj_scaffold.env import add_site_dir
HERE = os.path.dirname(__file__)
ROOT_PATH = os.path.abspath(os.path.join(HERE, '../'))
ALLDIRS = [os.path.join(ROOT_PATH, 'env/lib/python2.7/site-packages'), os.path.join(ROOT_PATH, 'sites')]
add_site_dir(ALLDIRS)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"zbirder@gmail.com"
] |
zbirder@gmail.com
|
b18b81bc61a1aca39d22a6fef58527e12ba9bee6
|
24eca673c9b7b6257847bb7aa154994f8e80295b
|
/pyguide/__init__.py
|
efc39bb2b1eddc355649a945707ac4dafdcbcae0
|
[] |
no_license
|
MacHu-GWU/pyrabbit-python-advance-guide-project
|
47916c7cd498bed77e56e15ec948be0086e058f0
|
6cdc081064c53b77631d3406501bfda7450963b4
|
refs/heads/master
| 2020-04-12T12:26:53.538471
| 2015-12-30T21:58:58
| 2015-12-30T21:58:58
| 42,208,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description:
This is a project documents everything the author learned from a newbie becoming
a Senior Python developer. This document is written mainly in Chinese, but I am
trying to do the best to make it bilingual.
Because I believe python3 is the future of python, so everything is tested in
python33.
说明:
pyrabbit-python-advance-guide-project 是一个将笔者从一个无任何计算机背景的菜鸟
到成长为一个高级Python开发者 - 大数据科学家的路上, 所学到的所有Python的知识。
经过笔者的思考, 和精心挑选的可执行的代码例子, 并将整个文档网站化, 可搜索化的一个
项目。
本项目基于Python33社区
项目主要分为下面5大部分:
- :mod:`cookbook <pyguide.cookbook>`: 一些有关Python的深入探讨
- :mod:`newbie <pyguide.newbie>`: 从零开始学Python
- :mod:`ref <pyguide.ref>`: Python官方参考文档中的精华总结
- :mod:`stdlib <pyguide.stdlib>`: Python标准库的摘要
- :mod:`package <pyguide.package>`: 第三方库相关文档
"""
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
b79edf89c8828024f571709b514601ebae22c9db
|
c2fa3b814a7f56ad804dffc767fc54f5099d60f8
|
/dataset_processing/contours_gilbert_256_sparse_nonRandomShear.py
|
73feb4a72e2d84d541d19dd708bfc9b3a1a8aa08
|
[] |
no_license
|
dmely/contextual_circuit_bp
|
223b602dbabbe8f8091fbb9106f3103bd5e1dcba
|
a277bc3146beaa4e3edd2134fc9fb8d3388a6013
|
refs/heads/master
| 2021-10-07T19:04:14.509951
| 2018-03-31T17:10:33
| 2018-03-31T17:10:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,506
|
py
|
import os
import re
import numpy as np
import tensorflow as tf
from glob import glob
from config import Config
from ops import tf_fun
class data_processing(object):
def __init__(self):
self.name = 'contours_gilbert_256_sparse_nonRandomShear'
self.im_extension = '.png'
self.images_dir = 'images'
self.label_regex = r'(?<=length)\d+'
self.config = Config()
self.im_size = [256, 256, 3] # 600, 600
self.model_input_image_size = [256, 256, 3] # [107, 160, 3]
self.max_ims = 0
self.output_size = [1]
self.label_size = self.output_size
self.default_loss_function = 'cce'
self.score_metric = 'accuracy'
self.store_z = False
self.normalize_im = True
self.shuffle = True
self.input_normalization = 'zscore'
self.preprocess = [''] # ['resize_nn']
self.folds = {
'train': 'train',
'val': 'val'
}
self.cv_split = 0.9
self.cv_balance = True
self.targets = {
'image': tf_fun.bytes_feature,
'label': tf_fun.int64_feature
}
self.tf_dict = {
'image': tf_fun.fixed_len_feature(dtype='string'),
'label': tf_fun.fixed_len_feature(dtype='int64')
}
self.tf_reader = {
'image': {
'dtype': tf.float32,
'reshape': self.im_size
},
'label': {
'dtype': tf.int64,
'reshape': self.output_size
}
}
def get_data(self):
"""Get the names of files."""
files = np.asarray(
glob(
os.path.join(
self.config.data_root,
self.name,
'*%s' % self.im_extension)))
labels = np.asarray(
[int(re.search(self.label_regex, x).group()) for x in files])
labels = (labels > 1).astype(np.int32)
ul, lc = np.unique(labels, return_counts=True)
include_count = np.min(lc)
if self.max_ims:
include_count = np.min([include_count, lc])
# Trim files and labels to include_count
pos_idx = np.where(labels == 1)[0][:include_count]
neg_idx = np.where(labels == 0)[0][:include_count]
# Create CV folds
cv_files, cv_labels = {}, {}
cv_files[self.folds['train']] = {}
cv_files[self.folds['val']] = {}
prev_cv = 0
for k, v in self.folds.iteritems():
if k == self.folds['train']:
cv_split = int(include_count * self.cv_split)
elif k == self.folds['val']:
cv_split = int(include_count * (1 - self.cv_split))
else:
raise NotImplementedError
if prev_cv:
cv_split += prev_cv
cv_inds = np.arange(prev_cv, cv_split)
it_files = np.concatenate((
files[pos_idx][cv_inds],
files[neg_idx][cv_inds]))
it_labels = np.concatenate((
labels[pos_idx][cv_inds],
labels[neg_idx][cv_inds]))
if self.shuffle:
shuffle_idx = np.random.permutation(len(it_files))
it_files = it_files[shuffle_idx]
it_labels = it_labels[shuffle_idx]
cv_files[k] = it_files
cv_labels[k] = it_labels
prev_cv = cv_split
return cv_files, cv_labels
|
[
"drewlinsley@gmail.com"
] |
drewlinsley@gmail.com
|
7c67f33358881bf71360ee38962dfd0d831b637a
|
cc6a674cab1dc959189b9edff975625f4815bc1c
|
/ResNet/model.py
|
75cb8048b0ad5a819a1f03a4a51e5f67be9b1524
|
[
"MIT"
] |
permissive
|
shreyansh26/DL-Code-Repository
|
15173042f566ea42f96eb65283347927a2fab4ff
|
f1974eedc1fef54b2d274703390a22721e46f502
|
refs/heads/master
| 2023-07-15T23:15:05.484609
| 2021-08-30T15:41:20
| 2021-08-30T15:41:20
| 382,834,342
| 0
| 0
| null | 2021-07-04T12:11:08
| 2021-07-04T11:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
from typing import List, Optional
import torch
from torch import tensor
import torch.nn as nn
class ShortcutProjection(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x: torch.Tensor):
return self.bn(self.conv(x))
class ResidualBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
if stride != 1 or in_channels != out_channels:
self.shortcut = ShortcutProjection(in_channels, out_channels, stride)
else:
self.shortcut = nn.Identity()
self.act2 = nn.ReLU()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.act1(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
return self.act2(x + shortcut)
class BottleneckResidualBlock(nn.Module):
def __init__(self, in_channels: int, bottleneck_channels: int, out_channels: int, stride: int):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=1)
self.bn1 = nn.BatchNorm2d(bottleneck_channels)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=1)
self.bn2 = nn.BatchNorm2d(bottleneck_channels)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, stride=1)
self.bn3 = nn.BatchNorm2d(out_channels)
if stride != 1 or in_channels != out_channels:
self.shortcut = ShortcutProjection(in_channels, out_channels, stride)
else:
self.shortcut = nn.Identity()
self.act3 = nn.ReLU()
def forward(self, x: torch.Tensor):
shortcut = self.shortcut(x)
x = self.act1(self.bn1(self.conv1(x)))
x = self.act2(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
return self.act3(x + shortcut)
class ResNetBase(nn.Module):
def __init__(self, n_blocks: List[int], n_channels: List[int], bottlenecks: Optional[List[int]] = None, img_channels: int = 3, first_kernel_size: int = 7):
super().__init__()
assert len(n_blocks) == len(n_channels)
assert bottlenecks is None or len(bottlenecks) == len(n_channels)
self.conv = nn.Conv2d(img_channels, n_channels[0], kernel_size=first_kernel_size, stride=2, padding=first_kernel_size // 2)
self.bn = nn.BatchNorm2d(n_channels[0])
blocks = []
prev_channel = n_channels[0]
for i, channels in enumerate(n_channels):
if len(blocks) == 0:
stride = 2
else:
stride = 1
if bottlenecks is None:
blocks.append(ResidualBlock(prev_channel, channels, stride=stride))
else:
blocks.append(BottleneckResidualBlock(prev_channel, bottlenecks[i], channels, stride=stride))
prev_channel = channels
for _ in range(n_blocks[i] - 1):
if bottlenecks is None:
blocks.append(ResidualBlock(channels, channels, stride=1))
else:
blocks.append(BottleneckResidualBlock(channels, bottlenecks[i], channels, stride=1))
self.blocks = nn.Sequential(*blocks)
def forward(self, x: torch.Tensor):
x = self.bn(self.conv(x))
x = self.blocks(x)
x = x.view(x.shape[0], x.shape[1], -1)
return x.mean(dim=-1)
|
[
"shreyansh.pettswood@gmail.com"
] |
shreyansh.pettswood@gmail.com
|
6ba198aa9789fafafad70ec42d47555fbc892bfd
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2499/60764/260203.py
|
a884a40143476a0e998c5bfbe25a62acd3a735eb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
n=int(input())
stack=[]
for i in range(n):
command=input().split()
if command[0]=='Add':
stack.append(command)
elif command[0]=='Del':
ind=int(command[1])
if ind-1>=0 and ind-1<len(stack):
stack[ind-1][0]='No'
else:
x=int(command[1])
res=0
for j in range(len(stack)):
if stack[j][0]=="Add":
tem=int(stack[j][1])*x+int(stack[j][2])
if tem>int(stack[j][3]):
res+=1
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
18ad0fb390f8a5da0684d613ebbd0b40b06b19e0
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/coder_20200618172244.py
|
e1df5edcdbebf0e88ece5d9f49dfb3cc0efc3a55
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
def MinWindowSubstring(strArr):
# code goes here
containing_string = strArr[0] # first string
search_string = ''.join(sorted(strArr[1])) # second string
print(len(search_string))
min_chars_required = len(search_string)
solution = ''
solution_array = []
for x in containing_string:
solution += x
total_cnt = 0
print("",solution)
for c in search_string:
found_cnt = solution.count(c)
needed_cnt = search_string.count(c)
if found_cnt >= needed_cnt:
total_cnt += 1
# print(total_cnt)
if total_cnt == min_chars_required:
solution_array.append(solution)
# print(solution_array)
# solution = ''
# actual_solution_array = []
# for word in solution_array:
# word = word [::-1]
# for x in word:
# solution += x
# total_cnt = 0
# # print(solution)
# for c in search_string:
# found_cnt = solution.count(c)
# needed_cnt = search_string.count(c)
# if found_cnt >= needed_cnt:
# total_cnt += 1
# # print(total_cnt)
# if total_cnt == min_chars_required:
# actual_solution_array.append(solution)
# answer = min((word for word in actual_solution_array if word), key=len)
# answer = answer [::-1]
# return answer
print(MinWindowSubstring(["sz","azjskfzts"]))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
12c9baca46273c4388c17257f7bd4400874025fc
|
485be21ebe0a956b7f4a681968e160a463903ecc
|
/LibSVMRelate/SVMRunSinglePara.py
|
4902b0dc26c4909d9231de75050cbe1097987df6
|
[] |
no_license
|
xiongchenyan/cxPyLib
|
e49da79345006d75a4261a8bbd4cc9a7f730fad2
|
8d87f5a872458d56276a2a2b0533170ede4d5851
|
refs/heads/master
| 2021-01-10T20:43:20.147286
| 2016-01-14T04:02:45
| 2016-01-14T04:02:45
| 17,610,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
'''
Created on Mar 25, 2014
input: train, dev|test, para
output: accuracy, label of required
@author: cx
'''
import site
site.addsitedir('/bos/usr0/cx/PyCode/Geektools')
site.addsitedir('/bos/usr0/cx/PyCode/QueryExpansion')
site.addsitedir('/bos/usr0/cx/PyCode/cxPylib')
site.addsitedir('/bos/usr0/cx/LibSVM/libsvm/python/')
from LibSVMRelate.SVMBase import *
from svmutil import *
from cxBase.base import *
from cxBase.ContingencyTable import *
import json
class SVMRunSingleParaC(object):
def LoadData(self,InName):
print "to be implemented by my inherited class"
return [[],[]]
def DumpPrediction(self,OutName,TestInName ,p_label,p_val):
print "to be implemented by my inherited class"
return False
def Process(self,TrainInName,TestInName,ParaInName,OutName):
lY,lX = self.LoadData(TrainInName)
lSVMPara = ReadSVMParaSet(ParaInName)
SVMPara = lSVMPara[0] #only use first one
SVMModel = svm_train(lY,lX,SVMPara.dump())
lTestY,lTestX = self.LoadData(TestInName)
p_label,p_acc,p_val = svm_predict(lTestY,lTestX,SVMModel,'-b 1')
#add a contengency matrix output?
lCTable = ContingencyTable(p_label,lTestY)
print json.dumps(lCTable)
out = open(OutName,'w')
json.dump(lCTable,out)
out.close()
self.DumpPrediction(OutName + "_pre", TestInName, p_label, p_val)
return True
|
[
"xiongchenyan@gmail.com"
] |
xiongchenyan@gmail.com
|
fc3532d68cd78aace0e9f911ff245742aa89aed3
|
991eef78c307ebfd5d149d5908270e1e528e75c2
|
/models/cells/modelPC2006Akemann.py
|
b4e850851b05e33ace54ccde2264c8a4c21316f1
|
[
"BSD-3-Clause"
] |
permissive
|
HarshKhilawala/cerebmodels
|
bd1a2310253de5536a77be08dfdb33e29d6e636d
|
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
|
refs/heads/master
| 2023-06-11T09:36:10.969240
| 2021-06-29T17:21:09
| 2021-06-29T17:21:09
| 369,399,200
| 0
| 0
|
BSD-3-Clause
| 2021-05-21T03:08:41
| 2021-05-21T03:08:40
| null |
UTF-8
|
Python
| false
| false
| 7,378
|
py
|
# ~/models/cells/modelPC2006Akemann.py
import os
pwd = os.getcwd() # record root directory path ~/cerebmodels
path_to_files = pwd + os.sep + "models" + os.sep + "cells" + os.sep + \
"PC2006Akemann" + os.sep # record path to this model/folder
from models.cells.PC2006Akemann.Purkinje import Purkinje
from executive import ExecutiveControl
from managers.simulation import SimulationManager as sm
from managers.read import ReadManager as rm
from managers.signalprocessing import SignalProcessingManager as spm
import sciunit
from cerebunit.capabilities.cells.response import ProducesElectricalResponse
from cerebunit.capabilities.cells.measurements import ProducesSomaRestingVm, ProducesSomaSpikeHeight
#from pdb import set_trace as breakpoint
class PurkinjeCell( sciunit.Model,
ProducesElectricalResponse,
ProducesSomaRestingVm ):
"""USE CASE:
"""
# AFTER the model is in the HBP Validation Framework Model catalog, set the generated uuid
#uuid = "22dc8fd3-c62b-4e07-9e47-f5829e038d6d"
def __init__(self):
### ===================== Descriptive Attributes ======================
self.modelscale = "cells"
self.modelname = "PC2006Akemann"
# ------specify cell-regions from with response are recorded-------
self.regions = {"soma": ["v"]} #"dend_sm": ["v"], "dend_sp": ["v"]}
self.recordingunits = {"v": "mV"}
# -----------attributed inheritance from sciunit.Model--------------
self.name = "Akemann and Knöpfel 2006 model of PurkinjeCell"
self.description = "Akemann & Knöpfel 006 model of PurkinjeCell (PC) and published in 10.1523/JNEUROSCI.5204-05.2006 This is a single compartment model. This model is the SciUnit wrapped version of the NEURON model in modelDB accession # 80769."
#
### =================== Instantiate cell template ====================
sm.lock_and_load_model_libraries(modelscale=self.modelscale,
modelname=self.modelname)
os.chdir(path_to_files)
self.cell = Purkinje()
os.chdir(pwd)
### ===============================================================
self.fullfilename = "nil"
self.prediction = "nil"
#
# =======================================================================
# +++++++++++++++++++++++ MODEL CAPABILITIES ++++++++++++++++++++++++++++
# =======================================================================
# --------------------- produce_voltage_response ------------------------
def produce_voltage_response(self, **kwargs):
"""generic/essential model response
**Keyword Arguments:**
kwargs = { "parameters": dictionary with keys,
"stimparameters": None or dictionary with keys "type" and "stimlist",
"onmodel": instantiated model }
"""
#ExecutiveControl.launch_model_raw("cells")
print("Simulation produce_voltage_response starting ...")
ec = ExecutiveControl() # only works when in ~/cerebmodels
model = ec.launch_model( parameters = kwargs["parameters"],
stimparameters = kwargs["stimparameters"],
stimloc = kwargs["stimloc"],
onmodel = kwargs["onmodel"], mode = "raw" )
print("File saving ...")
fullfilename = ec.save_response()
setattr(model, "fullfilename", fullfilename)
print("File saved.")
print("Simulation produce_voltage_response Done.")
return model
# ----------------------- produce_soma_restingVm -----------------------------
def produce_soma_restingVm(self, **kwargs):
"""
kwargs = { "parameters": dictionary with keys,
"stimparameters": dictionary with keys "type" and "stimlist",
"onmodel": instantiated model }
"""
print("Sim produce_soma v_restingVm starting ...")
ec = ExecutiveControl() # only works when in ~/cerebmodels
model = ec.launch_model( parameters = kwargs["parameters"],
stimparameters = kwargs["stimparameters"],
stimloc = kwargs["stimloc"], onmodel = kwargs["onmodel"],
capabilities = {"model": "produce_voltage_response",
"vtest": ProducesElectricalResponse},
mode="capability")
nwbfile = rm.load_nwbfile(model.fullfilename)
orderedepochs = rm.order_all_epochs_for_region(nwbfile=nwbfile, region="soma v")
timestamps_over_epochs = [ rm.timestamps_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
data_over_epochs = [ rm.data_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
baseVms = spm.distill_baseVm_pre_epoch( timestamps = timestamps_over_epochs,
datavalues = data_over_epochs )
setattr(model, "prediction", baseVms)
print("Simulation produce_soma v_restingVm Done.")
return model
# ----------------------- produce_soma_spikeheight ------------------------
def produce_soma_spikeheight(self, **kwargs):
"""
kwargs = { "parameters": dictionary with keys,
"stimparameters": dictionary with keys "type" and "stimlist",
"onmodel": instantiated model }
"""
print("Sim produce_soma_spikeheight starting ...")
ec = ExecutiveControl() # only works when in ~/cerebmodels
model = ec.launch_model( parameters = kwargs["parameters"],
stimparameters = kwargs["stimparameters"],
stimloc = kwargs["stimloc"], onmodel = kwargs["onmodel"],
capabilities = {"model": "produce_voltage_response",
"vtest": ProducesElectricalResponse},
mode="capability" )
nwbfile = rm.load_nwbfile(model.fullfilename)
orderedepochs = rm.order_all_epochs_for_region(nwbfile=nwbfile, region="soma v")
timestamps_over_epochs = [ rm.timestamps_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
data_over_epochs = [ rm.data_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
baseVm = spm.distill_baseVm_pre_epoch( timestamps = timestamps_over_epochs,
datavalues = data_over_epochs )
try:
peakVms = spm.distill_peakVm_from_spikes( timestamps = timestamps_over_epochs,
datavalues = data_over_epochs )
except:
peakVms = baseVm
setattr(model, "prediction", peakVms[0] - baseVm[0])
print("Simulation produce_soma_spikeheight Done.")
return model
# ----------------------- produce_spike_train ---------------------------
def produce_spike_train(self, **kwargs):
"""
Use case:
"""
pass
|
[
"neuralgraphs@gmail.com"
] |
neuralgraphs@gmail.com
|
b997f16691dc838e057231b7245b34283772c091
|
e7e34e2726790686a1f239e22487fe7c957e179f
|
/homeassistant/components/nam/sensor.py
|
c5c9c9f2e77ca2bc5e91242996efcb8adf307036
|
[
"Apache-2.0"
] |
permissive
|
AlexxIT/home-assistant
|
68a17b49644c5d943b204dc75e1f11fe3b701161
|
8de7966104911bca6f855a1755a6d71a07afb9de
|
refs/heads/dev
| 2022-03-22T14:37:18.774214
| 2021-10-09T16:10:43
| 2021-10-09T16:10:43
| 100,278,871
| 9
| 0
|
Apache-2.0
| 2022-01-31T06:18:02
| 2017-08-14T14:50:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,817
|
py
|
"""Support for the Nettigo Air Monitor service."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import cast
from homeassistant.components.sensor import (
DOMAIN as PLATFORM,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.dt import utcnow
from . import NAMDataUpdateCoordinator
from .const import ATTR_UPTIME, DOMAIN, MIGRATION_SENSORS, SENSORS
PARALLEL_UPDATES = 1
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add a Nettigo Air Monitor entities from a config_entry."""
coordinator: NAMDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
# Due to the change of the attribute name of two sensors, it is necessary to migrate
# the unique_ids to the new names.
ent_reg = entity_registry.async_get(hass)
for old_sensor, new_sensor in MIGRATION_SENSORS:
old_unique_id = f"{coordinator.unique_id}-{old_sensor}"
new_unique_id = f"{coordinator.unique_id}-{new_sensor}"
if entity_id := ent_reg.async_get_entity_id(PLATFORM, DOMAIN, old_unique_id):
_LOGGER.debug(
"Migrating entity %s from old unique ID '%s' to new unique ID '%s'",
entity_id,
old_unique_id,
new_unique_id,
)
ent_reg.async_update_entity(entity_id, new_unique_id=new_unique_id)
sensors: list[NAMSensor | NAMSensorUptime] = []
for description in SENSORS:
if getattr(coordinator.data, description.key) is not None:
if description.key == ATTR_UPTIME:
sensors.append(NAMSensorUptime(coordinator, description))
else:
sensors.append(NAMSensor(coordinator, description))
async_add_entities(sensors, False)
class NAMSensor(CoordinatorEntity, SensorEntity):
"""Define an Nettigo Air Monitor sensor."""
coordinator: NAMDataUpdateCoordinator
def __init__(
self,
coordinator: NAMDataUpdateCoordinator,
description: SensorEntityDescription,
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attr_device_info = coordinator.device_info
self._attr_unique_id = f"{coordinator.unique_id}-{description.key}"
self.entity_description = description
@property
def native_value(self) -> StateType:
"""Return the state."""
return cast(
StateType, getattr(self.coordinator.data, self.entity_description.key)
)
@property
def available(self) -> bool:
"""Return if entity is available."""
available = super().available
# For a short time after booting, the device does not return values for all
# sensors. For this reason, we mark entities for which data is missing as
# unavailable.
return (
available
and getattr(self.coordinator.data, self.entity_description.key) is not None
)
class NAMSensorUptime(NAMSensor):
"""Define an Nettigo Air Monitor uptime sensor."""
@property
def native_value(self) -> str:
"""Return the state."""
uptime_sec = getattr(self.coordinator.data, self.entity_description.key)
return (
(utcnow() - timedelta(seconds=uptime_sec))
.replace(microsecond=0)
.isoformat()
)
|
[
"noreply@github.com"
] |
AlexxIT.noreply@github.com
|
8be966152d0f159f1526e3cbea1aa111e05af8a3
|
d3b3a3e8c3a053c74479d588a26dd0343e015663
|
/练习代码/Flask/6.7/重定向.py
|
50f2523f83def5f96a516f4a5c4c6f8cfa8cd41e
|
[] |
no_license
|
Mr-big-c/github
|
b6978ad920bc8f4d2cee1fca1cac72cce5767e12
|
d301aa4516a00007980f34b94de3c7b5fb4198fa
|
refs/heads/master
| 2021-10-01T18:57:51.133095
| 2018-11-28T15:00:40
| 2018-11-28T15:00:40
| 300,145,501
| 1
| 0
| null | 2020-10-01T04:42:30
| 2020-10-01T04:42:29
| null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
# -*- coding: utf-8 -*-
# @File : 重定向.py
# @Author: 一稚杨
# @Date : 2018/6/7/007
# @Desc : 重定向和404页面定义
# redirect实现重定向
from flask import Flask, redirect, render_template, flash
app = Flask(__name__)
app.secret_key = '123456'
@app.route("/index1")
def index1():
flash("登录成功", category="login")
flash("hello",category="hello")
return redirect("/index2/")
@app.route("/index2/")
def index2():
return render_template("flash.html")
@app.errorhandler(404)
def error(error):
return render_template("404.html"),404
# form表单action为空时访问那个页面?结论:当action为空时,数据提交给发送数据的页面
@app.route("/action_none", methods=["GET", "POST"])
def action_none():
return render_template("action.html")
app.run(debug=True)
|
[
"2551628690@qq.com"
] |
2551628690@qq.com
|
4472c64391e5233857750bdc27edfff4c207f694
|
c189f58db5e339df742a78b9be0a3b768d5bb3e8
|
/Python_学习手册/Exercise/timer.py
|
6be6efc4c2ebd1be252362535947d39bc6e51233
|
[] |
no_license
|
xuyagang/pycon
|
19ac0de6ff2ef0569dbc81673aed51aad1ccd8bc
|
b6be3293f0ffc7d399fd039f6b274fff8f71584c
|
refs/heads/master
| 2021-12-15T05:03:42.992320
| 2021-11-03T23:59:19
| 2021-11-03T23:59:19
| 153,795,002
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
import time
reps = 1000
repslist = range(reps)
def timer(func,*pargs, **kargs):
start = time.clock()
for i in repslist:
ret = func(*pargs,**kargs)
elapsed = time.clock() - start
return (elapsed, ret)
|
[
"aaadam@mail.com"
] |
aaadam@mail.com
|
6c44b31939b27278f1be85c015a64c31f9a07491
|
5f8534eadc182d0c79c4089bd803bb63b1178f5d
|
/src/textbook/chapter5/chapter5-1/chapter5-1-2.py
|
6f6d9fd758ac57e658af109068ea7a4c0991a052
|
[] |
no_license
|
mryyomutga/TechnicalSeminar
|
b0f0a3c2fbff469e22896dc782586febdd604919
|
3bd1c3a9c1c3e2331586dbaab0ce745c2f7e86bd
|
refs/heads/master
| 2021-08-28T02:30:02.062637
| 2017-12-11T03:29:15
| 2017-12-11T03:29:15
| 106,644,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
# -*- coding: utf-8 -*-
# デスクトップアプリケーションの作成2
# ファイル選択ダイアログ
import tkinter.filedialog as fd
path = fd.askopenfilename(
title="ファイルを選択してください",
filetypes=[("python", "py")]
)
print(path)
|
[
"mryyomutga@gmail.com"
] |
mryyomutga@gmail.com
|
a8881eead08e69937d42ebfbb4af65004a20ed91
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/impairment/profile/delay/delay.py
|
1bb184d1931df595e004a78377ad7156ad6ab4ba
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475
| 2021-09-14T10:02:16
| 2021-09-14T10:02:16
| 405,919,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,817
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Delay(Base):
"""Delay each packet.
The Delay class encapsulates a required delay resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'delay'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'Units': 'units',
'Value': 'value',
}
_SDM_ENUM_MAP = {
'units': ['kilometers', 'kKilometers', 'kMicroseconds', 'kMilliseconds', 'kSeconds', 'microseconds', 'milliseconds', 'seconds'],
}
def __init__(self, parent, list_op=False):
super(Delay, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, delay packets.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def Units(self):
# type: () -> str
"""
Returns
-------
- str(kilometers | kKilometers | kMicroseconds | kMilliseconds | kSeconds | microseconds | milliseconds | seconds): Specify the units for the delay value.
"""
return self._get_attribute(self._SDM_ATT_MAP['Units'])
@Units.setter
def Units(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Units'], value)
@property
def Value(self):
# type: () -> int
"""
Returns
-------
- number: Time to delay each packet.
"""
return self._get_attribute(self._SDM_ATT_MAP['Value'])
@Value.setter
def Value(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Value'], value)
def update(self, Enabled=None, Units=None, Value=None):
# type: (bool, str, int) -> Delay
"""Updates delay resource on the server.
Args
----
- Enabled (bool): If true, delay packets.
- Units (str(kilometers | kKilometers | kMicroseconds | kMilliseconds | kSeconds | microseconds | milliseconds | seconds)): Specify the units for the delay value.
- Value (number): Time to delay each packet.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
[
"pdobrinskiy@yahoo.com"
] |
pdobrinskiy@yahoo.com
|
ccaee085a1c2b7c3aa12fe980fb250035df6e7a2
|
63fe333ce975837a3cb9c061ea910dc6923ec724
|
/training/c17_numpy/e09-broadcasting.py
|
9f9da96038f4bda2d49208ab685d482e3e40508a
|
[] |
no_license
|
PablitoMoribe/pythontraining
|
1976a5297b8316f1295a0d5d85f5bd3c99a096e3
|
49e146a28080c1b4fc7c3a7b37ce8c4593a139ff
|
refs/heads/master
| 2020-07-07T16:56:44.136633
| 2019-08-19T21:47:34
| 2019-08-19T21:47:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
import numpy as np
# pip install numpy
arreglo1 = np.arange(1, 10, dtype=np.int32).reshape((3, 3))
print('Numpy Array 1:\n{}'.format(arreglo1))
arreglo2 = np.arange(3, 0, -1, dtype=np.int32).reshape((1, 3))
print('Numpy Array 2:\n{}'.format(arreglo2))
arreglo3 = np.arange(10, 40, 10, dtype=np.int32).reshape((3, 1))
print('Numpy Array 3:\n{}'.format(arreglo3))
print()
# Binary Operations
print('Numpy Addition (a1 + a2)')
print(arreglo1 + arreglo2, end='\n\n')
print('Numpy Addition (a1 + a3)')
print(arreglo1 + arreglo3, end='\n\n')
print('Numpy Addition (a1 + 5)')
print(arreglo1 + 5, end='\n\n')
|
[
"user.nuage@gmail.com"
] |
user.nuage@gmail.com
|
5213395f6c53b2b1a1104035cc73eb2592848add
|
250003a344e5cb974579d67daeeed7ea6b51f620
|
/day18/01_Demo_Logistic_Regression.py
|
02a1d8461a66806f24b2a0a7da863c86c35cf46c
|
[] |
no_license
|
lakshsharma07/training2019
|
540cace993859f06b4f402fc5a7849b26fb11ce8
|
d8ae0b0e1e567525e806834379713003a676ec06
|
refs/heads/master
| 2020-05-24T07:11:25.613107
| 2019-06-25T03:27:58
| 2019-06-25T03:27:58
| 187,154,851
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
# -*- coding: utf-8 -*-
import sklearn as sk
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
We will look at data regarding coronary heart disease (CHD) in South Africa. The goal is to use different variables such as tobacco usage, family history, ldl cholesterol levels, alcohol usage, obesity and more.
"""
heart = pd.read_csv('Heart_Disease.csv', sep=',', header=0)
heart.head()
labels = heart.iloc[:,9].values
features = heart.iloc[:,:9].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
features_train = sc.fit_transform(features_train)
features_test = sc.transform(features_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(features_train, labels_train)
#Calculate Class Probabilities
probability = classifier.predict_proba(features_test)
# Predicting the class labels
labels_pred = classifier.predict(features_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test, labels_pred)
|
[
"sharma1997lak@gmail.com"
] |
sharma1997lak@gmail.com
|
786f5b0e25618a69920c89fa5a7cc933ca584bdc
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/warmup.py
|
f5e0dc0589a40004ab8c2e93652e1cc11b26cce6
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,760
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import List
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode
from .perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig
from .utils import parse_server_url
LOGGER = logging.getLogger("warmup")
def performance_evaluation_warmup(
server_url: str,
model_name: str,
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
batching_mode: BatchingMode,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
):
protocol, host, port = parse_server_url(server_url)
measurement_interval = 2 * measurement_interval
measurement_request_count = 2 * measurement_request_count
if batching_mode == BatchingMode.STATIC:
batch_sizes = sorted({1, batch_sizes[-1]})
max_concurrency = 1
min_concurrency = 1
step = 1
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // 2)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
for batch_size in batch_sizes:
for concurrency in range(min_concurrency, max_concurrency + step, step):
params = {
"model-name": model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{host}:{port}",
"protocol": protocol,
"input-data": input_data,
"measurement-interval": measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"output-shared-memory-size": output_shared_memory_size,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = measurement_mode.value
params["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = offline_mode.value
params["output-shared-memory-size"] = output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config)
perf_analyzer.run()
|
[
"kkudrynski@nvidia.com"
] |
kkudrynski@nvidia.com
|
9030a9aeac0cd19b1e22c38323c28b02eeb34bd6
|
2ce2f39b8997e23ce3e22acbed911bd09b367dff
|
/tests/test_http_response.py
|
052f343814c78b48cc8b6393bb57974cc713763a
|
[
"Apache-2.0"
] |
permissive
|
gourneau/Growler
|
315270b0d7ae0a81e98efcb028da33f8489529d4
|
a057adbed0dbde6e6a15feee59add0f2889b546a
|
refs/heads/dev
| 2021-01-24T15:34:55.851883
| 2016-05-04T17:16:15
| 2016-05-04T17:16:15
| 58,094,178
| 1
| 0
| null | 2016-05-05T01:07:17
| 2016-05-05T01:07:17
| null |
UTF-8
|
Python
| false
| false
| 3,798
|
py
|
#
# tests/test_http_response.py
#
import growler
import asyncio
import pytest
from unittest import mock
from collections import OrderedDict
from growler.http.response import Headers
from mock_classes import (
request_uri,
)
@pytest.fixture
def res(mock_protocol):
return growler.http.HTTPResponse(mock_protocol)
@pytest.fixture
def mock_app():
return mock.Mock(spec=growler.App,
)
@pytest.fixture
def mock_protocol(mock_app, request_uri):
from urllib.parse import (unquote, urlparse, parse_qs)
parsed_url = urlparse(request_uri)
protocol = mock.Mock(spec=growler.http.GrowlerHTTPProtocol,
loop=mock.Mock(spec=asyncio.BaseEventLoop),
http_application=mock_app,
headers=None,
path=unquote(parsed_url.path),
query=parse_qs(parsed_url.query),)
protocol.socket.getpeername.return_value = ['', '']
return protocol
def test_constructor(res, mock_protocol):
assert isinstance(res, growler.http.HTTPResponse)
assert res.protocol is mock_protocol
def test_construct_with_eol(mock_protocol):
EOL = ':'
res = growler.http.HTTPResponse(mock_protocol, EOL)
assert isinstance(res, growler.http.HTTPResponse)
assert res.protocol is mock_protocol
assert res.EOL is EOL
def test_default_headers(res):
res._set_default_headers()
# assert res.protocol is mock_protocol
def test_send_headers(res):
res.send_headers()
def test_write(res):
res.write()
def test_write_eof(res):
res.write_eof()
def test_end(res):
res.end()
@pytest.mark.parametrize('url, status', [
('/', 200),
])
def test_redirect(res, url, status):
res.redirect(url, status)
@pytest.mark.parametrize('obj, expect', [
({'a': 'b'}, b'{"a": "b"}')
])
def test_json(res, mock_protocol, obj, expect):
res.json(obj)
assert res.headers['content-type'] == 'application/json'
mock_protocol. transport.write.assert_called_with(expect)
@pytest.mark.parametrize('obj, expect', [
({'a': 'b'}, b'{"a": "b"}')
])
def test_headers(res, mock_protocol, obj, expect):
res.json(obj)
assert res.headers['content-type'] == 'application/json'
mock_protocol.transport.write.assert_called_with(expect)
def test_header_construct_with_dict():
headers = Headers({'a': 'b', 'c': 'D'})
s = str(headers)
assert s == 'a: b\r\nc: D\r\n\r\n' or s == 'c: D\r\na: b\r\n\r\n'
def test_header_construct_with_keywords():
headers = Headers(a='b', c='D')
s = str(headers)
assert s == 'a: b\r\nc: D\r\n\r\n' or s == 'c: D\r\na: b\r\n\r\n'
def test_header_construct_mixed():
headers = Headers({'a': 'b'}, c='D')
s = str(headers)
assert s == 'a: b\r\nc: D\r\n\r\n' or s == 'c: D\r\na: b\r\n\r\n'
def test_header_set():
headers = Headers()
headers['foo'] = 'bar'
assert str(headers) == 'foo: bar\r\n\r\n'
def test_header_update_with_dict():
headers = Headers()
d = {'foo': 'bar'}
headers.update(d)
assert str(headers) == 'foo: bar\r\n\r\n'
def test_header_update_with_multiple_dicts():
headers = Headers()
d_0 = OrderedDict([('foo', 'baz'), ('a', 'b')])
d_1 = {'foo': 'bar'}
headers.update(d_0, d_1)
assert str(headers) == 'foo: bar\r\na: b\r\n\r\n'
def test_header_update_with_keyword():
headers = Headers()
headers.update(foo='bar')
assert str(headers) == 'foo: bar\r\n\r\n'
def test_header_update_with_mixed():
headers = Headers()
d = {'foo': 'bazz'}
headers.update(d, foo='bar')
assert str(headers) == 'foo: bar\r\n\r\n'
def test_callable_header_value():
headers = Headers()
headers['foo'] = lambda: 'bar'
assert str(headers) == 'foo: bar\r\n\r\n'
|
[
"andrewkubera@gmail.com"
] |
andrewkubera@gmail.com
|
4d3aacc2cd74a7a3f40fd55c24ee756ac2daa48c
|
fe6fe31cda7c367ba480faeadb119e5074d7c8a4
|
/src/uproot/dynamic.py
|
7ed0b06dd64af7d48e2ea49570b8af30c329e21f
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-hep/uproot5
|
a03ec9f8f20d8b5f45957ccf280e1cd75bfab89a
|
cb70ce0715276bbc403cb48a511c8a3b27cffe7f
|
refs/heads/main
| 2023-09-02T10:09:29.669547
| 2023-08-24T13:30:06
| 2023-08-24T13:30:06
| 262,422,450
| 51
| 20
|
BSD-3-Clause
| 2023-09-14T17:04:36
| 2020-05-08T20:30:09
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot5/blob/main/LICENSE
"""
This module is initially empty, a repository for dynamically adding new classes.
The purpose of this namespace is to allow :doc:`uproot.model.VersionedModel`
classes that were automatically generated from ROOT ``TStreamerInfo`` to be
pickled, with the help of :doc:`uproot.model.DynamicModel`.
In `Python 3.7 and later <https://www.python.org/dev/peps/pep-0562>`__, attempts
to extract items from this namespace generate new :doc:`uproot.model.DynamicModel`
classes, which are used as a container in which data from pickled
:doc:`uproot.model.VersionedModel` instances are filled.
"""
def __getattr__(name):
import uproot
g = globals()
if name not in g:
g[name] = uproot._util.new_class(name, (uproot.model.DynamicModel,), {})
return g[name]
|
[
"noreply@github.com"
] |
scikit-hep.noreply@github.com
|
21b22b5af215a87059bbfe2e4d13bdfb04694bcf
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/authentication/migrations/0064_spocdetails_source.py
|
550d0b16e9c44fdf0ef8a5a92e73079dbecdec30
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Generated by Django 2.0.5 on 2018-11-02 19:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0063_merge_20181025_1818'),
]
operations = [
migrations.AddField(
model_name='spocdetails',
name='source',
field=models.CharField(blank=True, max_length=2000),
),
]
|
[
"arunchaudhary@policybazaar.com"
] |
arunchaudhary@policybazaar.com
|
7eab1eee572d2490d59d02e103bda65a10f64328
|
4d9a9546a5dc0b550aede272c4ba85af88dbb673
|
/env/lib/python3.8/site-packages/pandas-stubs/core/computation/parsing.pyi
|
3139316a492d6b9c573024b3e5e9b527db637ce6
|
[] |
no_license
|
LuisGonzalezLopez/Luis-Gonzalez
|
f4064dc08ccbada80cc7b45f8fbaaf70f54f420c
|
109f50e2e26a1c4abed5ba502deda9e212955c69
|
refs/heads/master
| 2022-08-04T14:24:42.992548
| 2022-07-23T09:03:08
| 2022-07-23T09:03:08
| 103,600,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
pyi
|
import tokenize
from typing import (
Iterator,
Tuple,
)
BACKTICK_QUOTED_STRING: int
def create_valid_python_identifier(name: str) -> str: ...
def clean_backtick_quoted_toks(tok: Tuple[int, str]) -> Tuple[int, str]: ...
def clean_column_name(name: str) -> str: ...
def tokenize_backtick_quoted_string(
token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int
) -> Tuple[int, str]: ...
def tokenize_string(source: str) -> Iterator[Tuple[int, str]]: ...
|
[
"luxo237@gmail.com"
] |
luxo237@gmail.com
|
b8ddc127cfcb9245e757ef5500e01dc09c429923
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2513/60795/267936.py
|
d6640b0f57f31e720990a6b563b00de551112cf6
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
n=int(input())
result=[]
for i in range(0,n):
arr=[int(n) for n in input().split(',')]
for j in range(0,n):
result.append(arr[j])
k=int(input())
result.sort()
print(result[k-1])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
054eb96e09b87131cdc8f7bea38c96ab8504d39b
|
48d8e96e0eba526fb08c74ee17ca0b89b9ea4192
|
/find_maximum_subarray.py
|
a4607b76dd28b8813ac5e9f9bf58c916cd3afc30
|
[] |
no_license
|
John-W-Stevens/algorithms_clrs
|
12842e246b66f30d4d71c093f7c5919e941eb1f0
|
e926418f069de0ac7e5d6195b810df1be41edf3f
|
refs/heads/master
| 2020-12-21T09:16:35.035058
| 2020-04-22T18:25:42
| 2020-04-22T18:25:42
| 236,383,401
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,660
|
py
|
# Import numpy to use Inf
from numpy import Inf
def find_max_crossing_subarray(arr,low,mid,high):
""" From CLRS pgs. 70-74
"Takes as input an array and indices low,mid,high
Returns a tuple containing the indicies demarcating
a maximum sub-array that crosses the midpoint, along with
the sum of the values in a maximum sub-array."
Running time: linear """
# Look at the left
left_sum = -Inf
sm = 0 # sm for sum
for i in range(mid,low-1,-1):
sm += arr[i]
if sm >= left_sum:
left_sum = sm
max_left = i
# Look at the right
right_sum = -Inf
sm = 0
for j in range(mid+1,high+1):
sm += arr[j]
if sm >= right_sum:
right_sum = sm
max_right = j
return (max_left,max_right,left_sum+right_sum)
def find_maximum_subarray(arr,low,high):
""" From CLSR pgs. 70-74
Input is an array, arr. low and high are index positions in arr that function as bounds for searching.
Returns a tuple containing indicies for maximum sub-array along with the sum of sub-array.
Running time: theta(n lg n) where 'lg n' stands for log2n """
# Base case
if high == low:
return (low,high,arr[low])
else:
# find middle point of array
mid = (low + high)//2
# find a max sub-array in left sub-array
left_low,left_high,left_sum = find_maximum_subarray(arr,low,mid)
# find a max sub-array in right sub-array
right_low,right_high,right_sum = find_maximum_subarray(arr,mid+1,high)
# find a max sub-array that crosses the mid-point
cross_low,cross_high,cross_sum = find_max_crossing_subarray(arr,low,mid,high)
# test if left sub-array contains a sub-array with the maximum sum
if left_sum >= right_sum and left_sum >= cross_sum:
return (left_low,left_high,left_sum)
# test if right sub-array contains a sub-array with the maximum sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return (right_low,right_high,right_sum)
# if neither left nor right sub-arrays contain a sub-array with the maximum sum,
# then a maximum sub-array must cross the mid-point
else:
return (cross_low,cross_high,cross_sum)
test = [17, -25, 6, 18, -23, 8, 28, 6, 34, 31, -50, 3, 46, -33, -45, -26, 14, -23, 45, -24, 21, -31, 19, -41, 49,
47, 29, -11, 16, 12, -9, -14, 26, -46, -11, 39, -41, -13, -11, 8, -19, -13, -9, -25, -15, 27, 30, 8, 10]
print(find_maximum_subarray(test,0,len(test)-1))
# Output: (24, 32, 145) the maximum subarray exists at test[24:33] and the sum of this subarray is 145
|
[
"john.william.stevens1@gmail.com"
] |
john.william.stevens1@gmail.com
|
0f104fa41dfb5aa5dba2362b7d925e046b1292c4
|
e8d7e13eb4d26c0a0147f2d0208d70e61f865c2c
|
/untitled50/bin/python3.6m-config
|
54996d6a3566cc7b19ca4b84dbefad58b93fe58f
|
[] |
no_license
|
fazi4888/APCSP-FreshmanYear
|
55c5b5717aadeb2d871582754174f88213a488fe
|
b4f0f797b2c469e148b0330ad9d309610f1f0668
|
refs/heads/master
| 2022-11-30T11:51:30.210342
| 2020-08-17T15:00:37
| 2020-08-17T15:00:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,062
|
#!/Users/rayaan_siddiqi23/untitled50/bin/python
# -*- python -*-
# Keep this script in sync with python-config.sh.in
import getopt
import os
import sys
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'extension-suffix', 'help', 'abiflags', 'configdir']
def exit_with_usage(code=1):
print("Usage: {0} [{1}]".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)), file=sys.stderr)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
libs = ['-lpython' + pyver + sys.abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
print(sysconfig.get_config_var('EXT_SUFFIX'))
elif opt == '--abiflags':
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"rayaan.siddiqi@gmail.com"
] |
rayaan.siddiqi@gmail.com
|
|
63cff8ecff4e85addb997ed55491b924444e50ac
|
e1c7c25c22c2f854aa8e3d8f6fffdf80a0b4dfbf
|
/CodeChef/CodeChef_Contests/Distribute_Candies.py
|
b7374b54f77afa5e0246f35539e5cff21c1a8d76
|
[] |
no_license
|
roshan13ghimire/Competitive_Programming
|
efc85f9fe6fa46edff96931ca3a1cca78628918b
|
0c238a391c6acee8763968ef298b765c133b7111
|
refs/heads/master
| 2023-04-15T16:35:07.711491
| 2021-04-12T03:00:05
| 2021-04-12T03:00:05
| 273,682,360
| 4
| 1
| null | 2020-08-05T02:11:53
| 2020-06-20T09:59:57
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
py
|
#Distribute_Candies
for _ in range(int(input())):
n = int(input())
a=list(map(int,input().split()))
if(len(a)==0):
print(a[0])
elif(len(a)==1):
print(min(a))
else:
a.sort(reverse=True)
s=0
for i in range(2,len(a),3):
s +=a [i]
print(s)
|
[
"noreply@github.com"
] |
roshan13ghimire.noreply@github.com
|
ab718c6751938719fe2a3fad3cb11077d9a8ee9a
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/sql/v20201101preview/get_job_credential.py
|
78541af0a1ae2757fdc71c031ccd8aba7355f0fc
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 3,672
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetJobCredentialResult',
'AwaitableGetJobCredentialResult',
'get_job_credential',
]
@pulumi.output_type
class GetJobCredentialResult:
"""
A stored credential that can be used by a job to connect to target databases.
"""
def __init__(__self__, id=None, name=None, type=None, username=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def username(self) -> str:
"""
The credential user name.
"""
return pulumi.get(self, "username")
class AwaitableGetJobCredentialResult(GetJobCredentialResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobCredentialResult(
id=self.id,
name=self.name,
type=self.type,
username=self.username)
def get_job_credential(credential_name: Optional[str] = None,
job_agent_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobCredentialResult:
"""
A stored credential that can be used by a job to connect to target databases.
:param str credential_name: The name of the credential.
:param str job_agent_name: The name of the job agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['credentialName'] = credential_name
__args__['jobAgentName'] = job_agent_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20201101preview:getJobCredential', __args__, opts=opts, typ=GetJobCredentialResult).value
return AwaitableGetJobCredentialResult(
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
username=__ret__.username)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
bfbdd12c0144be46d6a9f9cd9681526f91cb2302
|
b24e993bfae0e530b7c6ee676b0efa1b2cbea33c
|
/rsopt/run.py
|
0b53f55ed903b0c68a87716fab0c306be408db6a
|
[
"Apache-2.0"
] |
permissive
|
tanxicccc/rsopt
|
f99d8d721ce37647717b41c08b44f69a065444ae
|
8705e937f95a4bbe6ed3fb1a04b78f724a5f3931
|
refs/heads/master
| 2023-01-06T19:21:40.065806
| 2020-10-24T23:48:34
| 2020-10-24T23:48:34
| 288,584,476
| 0
| 0
|
Apache-2.0
| 2020-08-18T23:19:55
| 2020-08-18T23:19:54
| null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Functions to run a call from originating from pkcli
# This is just a temporary setup. libEnsembleOptimizer shouldn't actually be tied to execution mode
# It is instantiated because nlopt was requested
# THe executor will be setup separately based off 'execution_type' in YAML and registered with libEnsembleOptimizer
from rsopt.libe_tools.optimizer import libEnsembleOptimizer
def serial(config):
opt = libEnsembleOptimizer()
opt.load_configuration(config)
return opt #.run()
# These names have to line up with accepted values for setup.execution_type
# Another place where shared names are imported from common source
run_modes = {
'serial': serial
}
|
[
"chall@radiasoft.net"
] |
chall@radiasoft.net
|
d5e3cb808796ca54150f2786b23de7ecf5ca0361
|
5f57cfb662e7a490235255273114d2eb712a9ce4
|
/djd-prog2/manha-aula2/operadores2.py
|
8f487d67a5131274c9bbe4910aa05cb36e8f139f
|
[] |
no_license
|
antoniorcn/fatec-2020-2s
|
762e14acfbf0cb42a2e478662e6cf0001794f72c
|
4d90cc35d354382ad38c20ce2e32924216d7d747
|
refs/heads/master
| 2023-01-27T22:52:29.529360
| 2020-12-05T13:51:30
| 2020-12-05T13:51:30
| 289,972,977
| 9
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
temperatura = 14
frio = temperatura < 15
horas_acordado = 18
sono = horas_acordado > 16
ficar_casa = sono and frio
print("Ficar em casa: ", ficar_casa)
|
[
"antoniorcn@hotmail.com"
] |
antoniorcn@hotmail.com
|
b9b986b188c838e28b4a55d6dad2d567a40a0153
|
616c7325ee8c4a7d37467a8ad760d00891d3b450
|
/get_linkedin.py
|
41f290be48e6b44cc8360af16080bdcde11e899f
|
[] |
no_license
|
vintasoftware/networking.cool
|
c8f7b3d027b098966ea4c47a211a465eaf746661
|
314d3b32ab3d611fe2b622d57372dc29fb5eac5e
|
refs/heads/master
| 2023-06-19T19:38:03.557182
| 2015-11-15T19:05:08
| 2015-11-15T19:05:08
| 64,401,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
# coding: utf-8
from selenium.webdriver import Chrome
from selenium.common.exceptions import WebDriverException
import requests
from retrying import retry
import json
import time
import pprint
import traceback
def wait_for(fn, timeout=5):
start_time = time.time()
while time.time() - start_time < timeout:
try:
return fn()
except WebDriverException:
time.sleep(0.1)
# one more try, which will raise any errors if they are outstanding
return fn()
def find_by_css(selector):
def _find():
return driver.find_element_by_css_selector(selector)
return wait_for(_find)
def find_linkedin_url(term):
params = {
'key': 'AIzaSyBs_qRMdd3UxIp6HQ9zMidKupXkRCtmZrQ',
'cx': '007098471526848933106:c_yqy7e87hi',
'q': term
}
r = requests.get(
'https://www.googleapis.com/customsearch/v1', params=params)
r_json = r.json()
for item in r_json['items']:
if 'pagemap' in item and 'person' in item['pagemap']:
return item['link']
@retry(stop_max_delay=6000)
def get_contact_linkedin_html(name, company):
term = u"{} {}".format(name, company)
url = find_linkedin_url(term)
if not url:
raise ValueError(u"No result for {}".format(term))
driver.get(url)
return find_by_css('#profile').get_attribute('innerHTML'), url
driver = Chrome()
driver.set_window_size(1280, 700)
results = []
def main():
with open('attendees.json') as f:
attendees_raw = json.load(f)
attendees = [(a['name'], a['info2']) for a in attendees_raw
if 'info2' in a]
for name, company in attendees:
try:
html, link = get_contact_linkedin_html(name, company)
except Exception:
print traceback.print_exc()
html = None
link = None
results.append({
'name': name,
'company': company,
'html': html,
'link': link
})
print len(results)
with open('results.json', 'w+') as out:
json.dump(results, out, indent=2)
driver.quit()
if __name__ == '__main__':
start_time = time.time()
main()
print("--- ran in %s seconds ---" % (time.time() - start_time))
|
[
"flaviojuvenal@gmail.com"
] |
flaviojuvenal@gmail.com
|
e4c28f5e3f4811b1f754c5f93c0fe9abaa34c559
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R3/benchmark/startPyquil415.py
|
dc4c420c9e6301077dbf98ee6a6da1cfc57977c0
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
# qubit number=4
# total number=11
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += X(1) # number=8
prog += H(3) # number=4
prog += Y(3) # number=5
prog += X(3) # number=7
prog += CNOT(3,0) # number=9
prog += CNOT(3,0) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil415.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
6c8f1633699688ed4eac61c01aefb521fce09724
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/lite/python/interpreter 2.py
|
c71daaf1bb4739a1074055c116eca8825cfc74e0
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:7ecbc5673643005289606d082d863298698c13aa95073050a67828c541faabde
size 33756
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
45661f2da45c893dd1a6b2204c6601008132cdfd
|
f079cce1a4f5d1e43ac5d6d83307609577086dd7
|
/tests/test_awesome_cli.py
|
7f1f9062ff5facf4788bfeb21532131691b9059d
|
[
"CC-BY-4.0"
] |
permissive
|
moraesmv/awesome-aws
|
0ee446cba83bb9d3201af5cc496333319b3ea65b
|
b3bac0395f23901d5338d4c036076307ce8c73b1
|
refs/heads/master
| 2020-05-31T00:09:26.761636
| 2015-12-28T13:19:48
| 2015-12-28T13:19:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Creative Commons Attribution 4.0 International License (CC BY 4.0)
# http://creativecommons.org/licenses/by/4.0/
import unittest
from click.testing import CliRunner
from awesome.awesome_cli import AwesomeCli
class AwesomeCliTest(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
self.awesome_cli = AwesomeCli()
|
[
"donne.martin@gmail.com"
] |
donne.martin@gmail.com
|
3128a7eec6212fb006adc2f806bb0d2303192523
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0351_0400/LeetCode358_RearrangeStringKDistanceApart.py
|
596860dc901240f03530a0afafccc23be9414195
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
'''
Created on Mar 23, 2017
@author: MT
'''
_______ h__
c_ Solution(o..
___ rearrangeString s, k
hashmap # dict
___ c __ s:
hashmap[c] hashmap.g.. c, 0)+1
heap # list
___ c, freq __ hashmap.i..
h__.heappush(heap, [-freq, c])
queue # list
res # list
w.... heap:
freq, c h__.heappop(heap)
res.a..(c)
queue.a..([freq, c])
__ l..(queue) < k:
_____
freq, c queue.p.. 0)
freq -freq-1
__ freq > 0
h__.heappush(heap, [-freq, c])
r.. ''.j..(res) __ l..(res) __ l..(s) ____ ''
___ test
testCases [
('aabbcc', 3),
('aaabc', 3),
('aaadbbcc', 2),
]
___ s, k __ testCases:
print('s: %s' % (s
print('k: %s' % (k
result rearrangeString(s, k)
print('result: %s' % (result
print('-='*20+'-')
__ _____ __ _____
Solution().test()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
a92affe8b0afb78a1c8610adfff9fe2d407ddb83
|
735a315ea82893f2acd5ac141f1a9b8be89f5cb9
|
/pylib/mdsplus_alpha/tests/segmentsUnitTest.py
|
67b013c0f7cb3cedebc0e7516d4af242ca439150
|
[] |
no_license
|
drsmith48/pppl-mdsplus-python
|
5ce6f7ccef4a23ea4b8296aa06f51f3a646dd36f
|
0fb5100e6718c8c10f04c3aac120558f521f9a59
|
refs/heads/master
| 2021-07-08T02:29:59.069616
| 2017-10-04T20:17:32
| 2017-10-04T20:17:32
| 105,808,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase,TestSuite
from tree import Tree
from mdsdata import *
from mdsscalar import *
from mdsarray import *
import numpy as np
import random
import os
import sys
import tempfile
_tmpdir=tempfile.mkdtemp()
def setUpModule():
pass
def tearDownModule():
import shutil
shutil.rmtree(_tmpdir)
class segmentsTests(TestCase):
def setUp(self):
os.environ["seg_tree_path"]=_tmpdir
def tearDown(self):
pass
def arrayDimensionOrder(self):
ptree=Tree('seg_tree',-1,'NEW')
ptree.addNode('IMM')
ptree.write()
ptree=Tree('seg_tree',-1)
ptree.createPulse(1)
ptree=Tree('seg_tree',1)
node=ptree.getNode('IMM')
WIDTH = 640
HEIGHT =480;
currFrame=np.zeros(WIDTH*HEIGHT, dtype = np.int16);
currTime=float(0);
for i in range(0,WIDTH):
for j in range(0,HEIGHT):
currFrame[i*HEIGHT+j]=random.randint(0,255)
currTime = float(0)
startTime = Float32(currTime)
endTime = Float32(currTime)
dim = Float32Array(currTime)
segment = Int16Array(currFrame)
segment.resize([1,HEIGHT,WIDTH])
shape = segment.getShape()
node.makeSegment(startTime, endTime, dim, segment)
retShape = node.getShape()
self.assertEqual(shape[0],retShape[0])
self.assertEqual(shape[1],retShape[1])
self.assertEqual(shape[2],retShape[2])
def runTest(self):
self.arrayDimensionOrder()
def suite():
tests = ['arrayDimensionOrder']
return TestSuite(map(segmentsTests,tests))
|
[
"drsmith8@wisc.edu"
] |
drsmith8@wisc.edu
|
21b3ef57b8af5a6359865fc56f601d779e0ab0cb
|
27ff8115b114f5a78a0f4c9d1a4981df43d5beb6
|
/Matplotlib/demo_00Mofandemo/19_animation_demo.py
|
e0fed7f44984f26b0ebb7667146806704055d518
|
[
"MIT"
] |
permissive
|
Asurada2015/Python-Data-Analysis-Learning-Notes
|
3da937504bc996c273da76b78baa814da3c2bc31
|
5697c8de3e5fd6562e77195b198b2d8ff836008e
|
refs/heads/master
| 2021-06-26T01:08:00.788099
| 2020-04-08T07:46:49
| 2020-04-08T07:46:49
| 97,900,161
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
fig, ax = plt.subplots()
# 生成子图,相当于fig = plt.figure(),
# ax = fig.add_subplot(),其中ax的函数参数表示把当前画布进行分割,
# 例:fig.add_subplot(2,2,2).表示将画布分割为两行两列,ax在第2个子图中绘制,其中行优先。
x = np.arange(0, 2*np.pi, 0.01) # 表示从0~2*np.pi之间每隔0.01取一个点
line, = ax.plot(x, np.sin(x)) # 注意,这里line后面要加上逗号,表示一个具有一个元素的元组
# print(type(line))
# print(type((line,)))
# <class 'matplotlib.lines.Line2D'>
# <class 'tuple'>
def animate(i): # 这里的i其实就是参数0-99,即时frames控制的参数,控制程序画图变换的次数
# print(i) # 0-99
line.set_ydata(np.sin(x + i/10.0)) # 改变线条y的坐标值
return line,
def init(): # 初始化函数,图形开始显示的状态
line.set_ydata(np.sin(x))
return line,
ani = animation.FuncAnimation(fig=fig, func=animate, frames=100, init_func=init,
interval=100, blit=False)
"""frames设定帧数,总共执行100个update就会进行下一次循环,并且frames还会作为参数传入animate()函数,init_func设定初始函数图像,
interval设置更新间隔此处设置为20毫秒,(仔细想想20毫秒其实是很小的一个间隔)
blit如果是只有变化了的像素点才更新就设置为True,如果是整张图片所有像素点全部更新的话就设置为False
"""
plt.show()
|
[
"1786546913@qq.com"
] |
1786546913@qq.com
|
57cca23e4498f672ba7e88f78c1b7c23e7a610ed
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/bam/task_specific/task.py
|
8d63fc0feb22d7b92605c5e9906677fe480308dc
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
class Example(object):
__metaclass__ = abc.ABCMeta
def __init__(self, task_name):
self.task_name = task_name
class Task(object):
"""Override this class to add a new task."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, long_sequences=False):
self.config = config
self.name = name
self.long_sequences = long_sequences
def get_examples(self, split):
return self.load_data(split + ".tsv", split)
def get_test_splits(self):
return ["test"]
def load_data(self, fname, split):
examples = self._create_examples(
read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),
max_lines=50 if self.config.debug else None),
split)
return examples
@abc.abstractmethod
def _create_examples(self, lines, split):
pass
@abc.abstractmethod
def get_scorer(self):
pass
@abc.abstractmethod
def get_feature_specs(self):
pass
@abc.abstractmethod
def featurize(self, example, is_training):
pass
@abc.abstractmethod
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
pass
def __repr__(self):
return "Task(" + self.name + ")"
def read_tsv(input_file, quotechar=None, max_lines=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for i, line in enumerate(reader):
if max_lines and i >= max_lines:
break
lines.append(line)
return lines
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
039644bfddd186986c491626e0b7127322ccbf58
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
|
162e6a51da5795a3b797f3692d41a13ade964552
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,791
|
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import abc
import logging
import time
from typing import Any, Optional
from azure.core.credentials import AccessToken
from ..._constants import DEFAULT_REFRESH_OFFSET, DEFAULT_TOKEN_REFRESH_RETRY_DELAY
from ..._internal import within_credential_chain
_LOGGER = logging.getLogger(__name__)
class GetTokenMixin(abc.ABC):
def __init__(self, *args, **kwargs) -> None:
self._last_request_time = 0
# https://github.com/python/mypy/issues/5887
super(GetTokenMixin, self).__init__(*args, **kwargs) # type: ignore
@abc.abstractmethod
async def _acquire_token_silently(self, *scopes: str, **kwargs) -> Optional[AccessToken]:
"""Attempt to acquire an access token from a cache or by redeeming a refresh token.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
For more information about scopes, see
https://learn.microsoft.com/azure/active-directory/develop/scopes-oidc.
:return: An access token with the desired scopes if successful; otherwise, None.
:rtype: ~azure.core.credentials.AccessToken or None
"""
@abc.abstractmethod
async def _request_token(self, *scopes: str, **kwargs) -> AccessToken:
"""Request an access token from the STS.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
For more information about scopes, see
https://learn.microsoft.com/azure/active-directory/develop/scopes-oidc.
:return: An access token with the desired scopes.
:rtype: ~azure.core.credentials.AccessToken
"""
def _should_refresh(self, token: AccessToken) -> bool:
now = int(time.time())
if token.expires_on - now > DEFAULT_REFRESH_OFFSET:
return False
if now - self._last_request_time < DEFAULT_TOKEN_REFRESH_RETRY_DELAY:
return False
return True
async def get_token(
self, *scopes: str, claims: Optional[str] = None, tenant_id: Optional[str] = None, **kwargs: Any
) -> AccessToken:
"""Request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
For more information about scopes, see
https://learn.microsoft.com/azure/active-directory/develop/scopes-oidc.
:keyword str claims: additional claims required in the token, such as those returned in a resource provider's
claims challenge following an authorization failure.
:keyword str tenant_id: optional tenant to include in the token request.
:keyword bool enable_cae: indicates whether to enable Continuous Access Evaluation (CAE) for the requested
token. Defaults to False.
:return: An access token with the desired scopes.
:rtype: ~azure.core.credentials.AccessToken
:raises CredentialUnavailableError: the credential is unable to attempt authentication because it lacks
required data, state, or platform support
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason.
"""
if not scopes:
raise ValueError('"get_token" requires at least one scope')
try:
token = await self._acquire_token_silently(*scopes, claims=claims, tenant_id=tenant_id, **kwargs)
if not token:
self._last_request_time = int(time.time())
token = await self._request_token(*scopes, claims=claims, tenant_id=tenant_id, **kwargs)
elif self._should_refresh(token):
try:
self._last_request_time = int(time.time())
token = await self._request_token(*scopes, claims=claims, tenant_id=tenant_id, **kwargs)
except Exception: # pylint:disable=broad-except
pass
_LOGGER.log(
logging.DEBUG if within_credential_chain.get() else logging.INFO,
"%s.get_token succeeded",
self.__class__.__name__,
)
return token
except Exception as ex:
_LOGGER.log(
logging.DEBUG if within_credential_chain.get() else logging.WARNING,
"%s.get_token failed: %s",
self.__class__.__name__,
ex,
exc_info=_LOGGER.isEnabledFor(logging.DEBUG),
)
raise
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
cb8a404a6028dd2e0c857ee4f7bbbaa73b7296c9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03200/s271432048.py
|
bcd6a8482b6eb023426b8dd72faca95f03661394
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
S = list(input())
n = len(S)
cnt_b = 0
cnt = 0
for i in range(n):
if S[i] == 'B':
cnt_b += 1
cnt += i
ans = int((2*n - cnt_b -1)*cnt_b*0.5 - cnt)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
59565e2453584531a21bcde645e428597918d80b
|
e714dfd95cb74f4e357af8d085e4dcaf8b7ecdf3
|
/0x03-python-data_structures/10-divisible_by_2.py
|
2d808d5249e958464d33eb26dcdab22c4db79367
|
[] |
no_license
|
MCavigli/holbertonschool-higher_level_programming_classic
|
2cea769dc1fd39e90f6ef74cdb3191e2472b0282
|
870548f964a3deac4a41918e9c3d0bad6cd732b4
|
refs/heads/master
| 2022-03-06T09:33:56.839118
| 2019-09-27T06:04:34
| 2019-09-27T06:04:34
| 184,122,977
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
#!/usr/bin/python3
def divisible_by_2(my_list=[]):
new = []
for num in my_list:
new.append(True) if num % 2 == 0 else new.append(False)
return new
|
[
"mcavigli@gmail.com"
] |
mcavigli@gmail.com
|
3034898b4c6efff6c979aa9c058eaad3bdfe78e5
|
d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3
|
/python/oop/method/iter.py
|
a612e99ce5f3e8c7cc0ed0aa897554f8d66889a4
|
[] |
no_license
|
yanxurui/keepcoding
|
3e988c76b123d55b32cf7cc35fbffb12c4ccb095
|
d6b9f07e2d1437681fa77fee0687ea9b83cab135
|
refs/heads/master
| 2021-01-24T09:01:41.306597
| 2020-05-21T05:36:04
| 2020-05-21T05:36:04
| 93,400,267
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
class Spam(object):
"""[summary]
>>> for i in Spam(1, 5):
... print(i)
...
1
4
9
16
>>> # iterate manully
>>> X = Spam(1, 5)
>>> I = iter(X)
>>> next(I)
1
>>> next(I)
4
>>> next(I)
9
>>> next(I)
16
>>> next(I)
Traceback (most recent call last):
StopIteration
"""
def __init__(self, start, stop):
self.value = start
self.stop = stop
def __iter__(self):
return self
# use `def __next__(self)` in python3
def next(self):
if self.value == self.stop:
raise StopIteration
rv = self.value ** 2
self.value += 1
return rv
|
[
"617080352@qq.com"
] |
617080352@qq.com
|
e85fdfeeaef3f45656935aec998ebc36b9928833
|
7f3a439c6ac921f07101759e932424792ee9e24e
|
/std_lib/asyncio/p01_coroutine_task/ch02_awaitables.py
|
9d984214e2480f5b6935010ec261bc2252865bb2
|
[] |
no_license
|
multiscripter/python
|
03f6299af0ee2153e9e3b696aa591e460a3d6a52
|
eef7900b7a564e0850659fb59c1218be9c6829ea
|
refs/heads/master
| 2023-04-13T05:24:29.434394
| 2021-04-26T12:11:59
| 2021-04-26T12:11:59
| 289,573,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
import asyncio
# Мы говорим, что объект является awaitable (ожидаемым) объектом,
# если его можно использовать в выражении await.
# Многие API-интерфейсы asyncio предназначены для приёма awaitable-объектов.
# Есть три основных типа awaitable-объектов: Coroutine, Task и Future.
# 1. Coroutine.
# coroutines являются awaitable, и поэтому их можно "ожидать" из других coroutines:
async def nested():
return 42
async def run_nested():
print(await nested())
asyncio.run(run_nested())
# 2. Task.
# Tasks используются для планирования запуска coroutines одновременно.
async def run_task_nested():
task = asyncio.create_task(nested())
print(await task)
asyncio.run(run_task_nested())
# 3. Future.
# Future - это специальный низкоуровневый ожидающий объект,
# представляющий конечный результат асинхронной операции.
# Когда ожидается объект Future, это означает, что coroutine будет ждать,
# пока Future не будет разрешен (resolved) в каком-то другом месте.
# Future-объекты в asyncio необходимы, чтобы разрешить использование кода
# на основе callback с async/await.
|
[
"ILL-JAH@yandex.ru"
] |
ILL-JAH@yandex.ru
|
53c448f8e4e441f92e2aec10e29d9ca24359595a
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1412+299/sdB_PG_1412+299_lc.py
|
1fc949095568bba26e541e43fa821db7addabfb7
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[213.598708,29.684953], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1412+299 /sdB_PG_1412+299_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
91f7b1e95f64d31541ee7a3ad43d81110ac198fd
|
1d38c549c07f43cc26b7353ef95300b934eeed33
|
/GUI/Edit/Specy.py
|
fb03a7097cd0a7523d4349c7642606a3422113c2
|
[] |
no_license
|
pooyagheyami/Adel3
|
a6354fbc5aa56a9c38a8b724c8d22bea689380a1
|
29e257e19fd6914de0e60c303871321e457a858b
|
refs/heads/master
| 2022-11-07T21:53:13.958369
| 2020-06-12T13:22:55
| 2020-06-12T13:22:55
| 271,803,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,690
|
py
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.grid
import Database.MDataGet as DG
###########################################################################
## Class MyPanel2
###########################################################################
class MyPanel2 ( wx.Panel ):
def __init__( self, parent , txts , ccod ,stit):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 273,256 ), style = wx.TAB_TRAVERSAL )
self.SetLayoutDirection(2)
self.ccod = ccod
#print self.ccod
#print stit
self.iData = DG.GetData(u'',u'')
self.itits = self.iData.gTitel(stit)
self.ispec = self.iData.gSpcy(self.ccod)
self.row = len(self.itits)
Vsz1 = wx.BoxSizer( wx.VERTICAL )
Hsz1 = wx.BoxSizer( wx.HORIZONTAL )
self.txt1 = wx.StaticText( self, wx.ID_ANY, txts[0], wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt1.Wrap( -1 )
Hsz1.Add( self.txt1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.fld1 = wx.TextCtrl( self, wx.ID_ANY, txts[1], wx.DefaultPosition, wx.DefaultSize, 0 )
Hsz1.Add( self.fld1, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
Vsz1.Add( Hsz1, 0, wx.EXPAND, 5 )
Hsz2 = wx.BoxSizer( wx.HORIZONTAL )
self.grid1 = wx.grid.Grid( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.STATIC_BORDER )
# Grid
self.grid1.CreateGrid( self.row, 2 )
self.grid1.EnableEditing( True )
self.grid1.EnableGridLines( True )
self.grid1.EnableDragGridSize( False )
self.grid1.SetMargins( 0, 0 )
# Columns
self.grid1.SetColSize( 0, 99 )
self.grid1.SetColSize( 1, 134 )
self.grid1.EnableDragColMove( False )
self.grid1.EnableDragColSize( True )
self.grid1.SetColLabelSize( 30 )
self.grid1.SetColLabelValue( 0, u"عنوان" )
self.grid1.SetColLabelValue( 1, u"مشخصه" )
self.grid1.SetColLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Rows
self.grid1.EnableDragRowSize( True )
self.grid1.SetRowLabelSize( 19 )
self.grid1.SetRowLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Label Appearance
# Cell Defaults
self.grid1.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )
Hsz2.Add( self.grid1, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
Vsz1.Add( Hsz2, 1, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
Hsz3 = wx.BoxSizer( wx.HORIZONTAL )
self.btn1 = wx.Button( self, wx.ID_ANY, u"انصراف", wx.DefaultPosition, wx.DefaultSize, 0 )
Hsz3.Add( self.btn1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.btn2 = wx.Button( self, wx.ID_ANY, u"ثبت شود", wx.DefaultPosition, wx.DefaultSize, 0 )
Hsz3.Add( self.btn2, 0, wx.ALL, 5 )
Vsz1.Add( Hsz3, 0, wx.ALIGN_CENTER_HORIZONTAL, 5 )
#self.iData = DG.GetData(u'',u'')
#self.itits = self.iData.gTitel(stit)
#self.ispec = self.iData.gSpcy(self.ccod)
self.lodtit()
self.SetSizer( Vsz1 )
self.Layout()
self.svit = False
# Connect Events
self.btn1.Bind( wx.EVT_BUTTON, self.cancl )
self.btn2.Bind( wx.EVT_BUTTON, self.aplyit )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def cancl( self, event ):
self.svit = False
q = self.GetParent()
q.Close()
def aplyit( self, event ):
self.svit = True
#self.sData = DG.SetData(u'',u'')
#idata = self.gettit()
#print idata
#self.savtit(idata,self.sData)
q = self.GetParent()
q.Close()
def lodtit(self):
j = 0
for t in self.itits:
self.grid1.SetCellValue(j,0,t[0])
for s in self.ispec:
if s[1] in t:
self.grid1.SetCellValue(j,1,s[0])
j = j + 1
def gettit(self):
self.spcy = []
for i in range(len(self.itits)):
ispc = self.grid1.GetCellValue(i,1)
if ispc != '':
self.spcy.append((self.ccod,self.itits[i][1],ispc))
#print self.itits[i][1]
#print self.spcy
return self.spcy
def RetRev(self):
return self.svit
|
[
"pooyagheyami@gmail.com"
] |
pooyagheyami@gmail.com
|
41f673888cfffe4c1aa80b2a5a347f7f943800df
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_02_syntax/examples/isdigit_isnumeric_isdecimal.py
|
e1596f3051a547ad360699482c643e2c9d57e600
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913
| 2021-06-18T11:53:35
| 2021-06-18T11:53:35
| 376,595,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
def check_str(my_str):
print(f"mystr: {my_str}")
print(f"isnumeric: {str.isnumeric(my_str)}")
print(f"isdigit: {str.isdigit(my_str)}")
print(f"isdecimal: {str.isdecimal(my_str)}")
print("-" * 50)
# isdecimal() ⊆ isdigit() ⊆ isnumeric()
if __name__ == '__main__':
check_str('½')
check_str('ⅠⅢⅧ')
check_str('⑩⑬㊿')
check_str('³')
check_str('🄀⒊⒏')
check_str('⓪③⑧')
check_str('038')
check_str('038') # FULLWIDTH DIGIT
check_str('٠١٢٣٤') # ARABIC-INDIC DIGIT
check_str('-38')
check_str('+38')
check_str('3_8')
|
[
"ryndovaira@gmail.com"
] |
ryndovaira@gmail.com
|
2f3807cf8f62e47c63a60a47b092def214c58a97
|
524756e5e03465584dcb2d04b8092fbe8840448a
|
/users/signals.py
|
61f3f6acbaa87c5bbd5887d4336cc27d40a10557
|
[
"Apache-2.0"
] |
permissive
|
areebbeigh/greenspace-demo
|
f9a241fb6c39a724b19094ccf21114647492dd81
|
0754f3b50e845bd5e50239361239f9b0b8aba42b
|
refs/heads/master
| 2022-10-11T05:45:05.036097
| 2020-06-11T11:04:12
| 2020-06-11T11:41:38
| 271,524,789
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
import sys
from django.conf import settings
def create_groups(sender, **kwargs):
from django.contrib.auth.models import Group
group, created = Group.objects.get_or_create(name=settings.NURSERY_MGR_GROUP)
print('Created new nursery group' if created else 'Nursery group already exists')
|
[
"areebbeigh@gmail.com"
] |
areebbeigh@gmail.com
|
9ab1dae85e9196df975840e6c6d4551650fb7216
|
3adec884f06eabfe50d4ab3456123e04d02b02ff
|
/148. Sort List.py
|
a2e8d101bc501c111c899032f45427524ee85d4f
|
[] |
no_license
|
windmzx/pyleetcode
|
c57ecb855c8e560dd32cf7cf14616be2f91ba50e
|
d0a1cb895e1604fcf70a73ea1c4b1e6b283e3400
|
refs/heads/master
| 2022-10-05T17:51:08.394112
| 2020-06-09T09:24:28
| 2020-06-09T09:24:28
| 250,222,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def sortList(self, head: ListNode) -> ListNode:
def split(head):
if head.next == None:
return head
p =head
q=head
while q != None and q.next != None:
temp=p
p=p.next
q=q.next.next
temp.next = None
l=split(head)
r=split(p)
return merge(l,r)
def merge(l1,l2):
head=ListNode(-1)
cur=head
while l1!=None and l2!=None:
if l1.val<l2.val:
cur.next=l1
l1=l1.next
cur=cur.next
else:
cur.next=l2
l2=l2.next
cur=cur.next
if l1!=None:
cur.next=l1
if l2!=None:
cur.next=l2
return head.next
if head is None:
return None
return split(head)
if __name__ == "__main__":
x=Solution()
li=ListNode(3)
p=li
p.next=ListNode(1)
p=p.next
p.next=ListNode(4)
p=p.next
p.next=ListNode(0)
p=p.next
p.next=ListNode(2)
p=p.next
p.next=ListNode(5)
p=p.next
re=x.sortList(li)
while re!=None:
print(re.val)
re=re.next
|
[
"2281927774@qq.com"
] |
2281927774@qq.com
|
313e25acc001a41e971e137fbcf6bee9f8d3a909
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v1.0.0.test/toontown/minigame/TwoDEnemyMgr.py
|
2b350c515aa1c976abc5e5490424ee267024aaa9
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.minigame import ToonBlitzGlobals
from toontown.minigame import TwoDEnemy
class TwoDEnemyMgr(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDEnemyMgr')
def __init__(self, section, enemyList):
self.section = section
self.enemyList = enemyList
self.load()
def destroy(self):
self.section = None
while len(self.enemies):
enemy = self.enemies[0]
enemy.destroy()
self.enemies.remove(enemy)
self.enemies = None
return
def load(self):
if len(self.enemyList):
self.enemiesNP = NodePath('Enemies')
self.enemiesNP.reparentTo(self.section.sectionNP)
self.enemies = []
for index in xrange(len(self.enemyList)):
enemyId = self.section.getSectionizedId(index)
suitAttribs = self.enemyList[index]
newEnemy = TwoDEnemy.TwoDEnemy(self, enemyId, suitAttribs)
newEnemy.suit.reparentTo(self.enemiesNP)
self.enemies.append(newEnemy)
def enterPlay(self, elapsedTime):
for enemy in self.enemies:
enemy.start(elapsedTime)
def exitPlay(self):
pass
def enterPause(self):
for enemy in self.enemies:
enemy.enterPause()
def exitPause(self):
for enemy in self.enemies:
enemy.exitPause()
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
10e3f86ff0569d076876a2d1d0c9d815c172bf83
|
811bf15a5bad488284300b6016c8b77f1e2daf24
|
/rllib/core/testing/tf/bc_module.py
|
cca3a42f4eeb2f0f78c637b944e16f7057e4c21d
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
justinwyang/ray
|
8c102e00321e669b7e78488d38329e82195e7b58
|
6c356296a01ebf3d8ad0cab6058fb8c03ccbf8f6
|
refs/heads/master
| 2023-04-28T15:57:24.167579
| 2023-04-22T08:28:06
| 2023-04-22T08:28:06
| 172,777,393
| 0
| 0
|
Apache-2.0
| 2020-12-04T02:39:57
| 2019-02-26T19:37:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,312
|
py
|
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Any, Mapping
from ray.rllib.core.rl_module.rl_module import RLModule, RLModuleConfig
from ray.rllib.core.rl_module.marl_module import (
MultiAgentRLModule,
MultiAgentRLModuleConfig,
)
from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule
from ray.rllib.core.models.specs.typing import SpecType
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.nested_dict import NestedDict
class DiscreteBCTFModule(TfRLModule):
def __init__(self, config: RLModuleConfig) -> None:
super().__init__(config)
def setup(self):
input_dim = self.config.observation_space.shape[0]
hidden_dim = self.config.model_config_dict["fcnet_hiddens"][0]
output_dim = self.config.action_space.n
layers = []
layers.append(tf.keras.Input(shape=(input_dim,)))
layers.append(tf.keras.layers.ReLU())
layers.append(tf.keras.layers.Dense(hidden_dim))
layers.append(tf.keras.layers.ReLU())
layers.append(tf.keras.layers.Dense(output_dim))
self.policy = tf.keras.Sequential(layers)
self._input_dim = input_dim
@override(RLModule)
def output_specs_exploration(self) -> SpecType:
return ["action_dist"]
@override(RLModule)
def output_specs_inference(self) -> SpecType:
return ["action_dist"]
@override(RLModule)
def output_specs_train(self) -> SpecType:
return ["action_dist"]
@override(RLModule)
def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]:
obs = batch[SampleBatch.OBS]
action_logits = self.policy(obs)
action_logits_inference = tf.argmax(action_logits, axis=-1)
action_dist = tfp.distributions.Deterministic(action_logits_inference)
return {"action_dist": action_dist}
@override(RLModule)
def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]:
return self._forward_inference(batch)
@override(RLModule)
def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]:
obs = batch[SampleBatch.OBS]
action_logits = self.policy(obs)
action_dist = tfp.distributions.Categorical(logits=action_logits)
return {"action_dist": action_dist}
@override(RLModule)
def get_state(self) -> Mapping[str, Any]:
return {"policy": self.policy.get_weights()}
@override(RLModule)
def set_state(self, state: Mapping[str, Any]) -> None:
self.policy.set_weights(state["policy"])
class BCTfRLModuleWithSharedGlobalEncoder(TfRLModule):
def __init__(self, encoder, local_dim, hidden_dim, action_dim):
super().__init__()
self.encoder = encoder
self.policy_head = tf.keras.Sequential(
[
tf.keras.layers.Dense(
hidden_dim + local_dim,
input_shape=(hidden_dim + local_dim,),
activation="relu",
),
tf.keras.layers.Dense(hidden_dim, activation="relu"),
tf.keras.layers.Dense(action_dim),
]
)
@override(RLModule)
def _default_input_specs(self):
return [("obs", "global"), ("obs", "local")]
@override(RLModule)
def _forward_inference(self, batch):
return self._common_forward(batch)
@override(RLModule)
def _forward_exploration(self, batch):
return self._common_forward(batch)
@override(RLModule)
def _forward_train(self, batch):
return self._common_forward(batch)
def _common_forward(self, batch):
obs = batch["obs"]
global_enc = self.encoder(obs["global"])
policy_in = tf.concat([global_enc, obs["local"]], axis=-1)
action_logits = self.policy_head(policy_in)
return {"action_dist": tf.distributions.Categorical(logits=action_logits)}
class BCTfMultiAgentModuleWithSharedEncoder(MultiAgentRLModule):
def __init__(self, config: MultiAgentRLModuleConfig) -> None:
super().__init__(config)
def setup(self):
# constructing the global encoder based on the observation_space of the first
# module
module_specs = self.config.modules
module_spec = next(iter(module_specs.values()))
global_dim = module_spec.observation_space["global"].shape[0]
hidden_dim = module_spec.model_config_dict["fcnet_hiddens"][0]
shared_encoder = tf.keras.Sequential(
[
tf.keras.Input(shape=(global_dim,)),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(hidden_dim),
]
)
for module_id, module_spec in module_specs.items():
self._rl_modules[module_id] = module_spec.module_class(
encoder=shared_encoder,
local_dim=module_spec.observation_space["local"].shape[0],
hidden_dim=hidden_dim,
action_dim=module_spec.action_space.n,
)
def serialize(self):
# TODO (Kourosh): Implement when needed.
raise NotImplementedError
def deserialize(self, data):
# TODO (Kourosh): Implement when needed.
raise NotImplementedError
|
[
"noreply@github.com"
] |
justinwyang.noreply@github.com
|
553b046fb1e362b9966b86843cf88a190f17cb17
|
544be854639d58b111c345a55597b0580e8106d8
|
/example/admin.py
|
77862791f8be12447236b147803759aedf1c3d46
|
[] |
no_license
|
arineto/django-multi-tenant-example
|
152abde09e5362e0ecaab9402d03aa23228f4bf3
|
ac74c1fa5ed3a4e3e70758c84398bcc0ec061b30
|
refs/heads/master
| 2021-01-20T20:44:45.315471
| 2016-07-11T13:48:09
| 2016-07-11T13:48:09
| 63,065,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
from django.contrib import admin
from .models import Item
from multi_tenant.models import Theme
from multi_tenant.models import Tenant
# Register your models here.
admin.site.register(Item)
admin.site.register(Theme)
admin.site.register(Tenant)
|
[
"arineto30@gmail.com"
] |
arineto30@gmail.com
|
a35078d691fddc504f227125b99e73b18a3a6ad3
|
8f61d6ae3a80eb6c6d45aab55d9e73df402446fe
|
/kate3/logger/admin.py
|
39df6a9788b2b169f0b040a71d1f3ca4d4fa7602
|
[
"MIT"
] |
permissive
|
katemsu/kate_website
|
2047314598e215b0e8b3d3d71b21b4c70df36213
|
9e6912156fe7ce07a13f54009ff1823b3558784d
|
refs/heads/master
| 2021-01-16T20:25:16.264407
| 2013-11-02T20:14:40
| 2013-11-02T20:14:40
| 14,073,589
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from django.contrib import admin
from logger.models import Entry
class EntryAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'action', 'created_at',)
list_filter = ('action', 'created_at',)
admin.site.register(Entry, EntryAdmin)
|
[
"smizell@gmail.com"
] |
smizell@gmail.com
|
04b264c1c72261ff9d515d75793a43e17fa06c3b
|
c223a3a88aad65fd48cef0d5cc40db5bf383033a
|
/mopidy/utils/process.py
|
11dafa8af19df78bf86ed1a1de01a867e4361409
|
[
"Apache-2.0"
] |
permissive
|
bok/mopidy
|
4cfb19a7c55bad2f45f57c76ec9db550a32aaf27
|
71d791291b83728f38a4e401a0c68180f403b6a9
|
refs/heads/master
| 2020-12-25T00:49:54.009159
| 2011-01-22T13:33:47
| 2011-01-22T13:33:47
| 1,455,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
import logging
import multiprocessing
import multiprocessing.dummy
from multiprocessing.reduction import reduce_connection
import pickle
import gobject
gobject.threads_init()
from mopidy import SettingsError
logger = logging.getLogger('mopidy.utils.process')
def pickle_connection(connection):
return pickle.dumps(reduce_connection(connection))
def unpickle_connection(pickled_connection):
# From http://stackoverflow.com/questions/1446004
(func, args) = pickle.loads(pickled_connection)
return func(*args)
class BaseProcess(multiprocessing.Process):
def __init__(self, core_queue):
super(BaseProcess, self).__init__()
self.core_queue = core_queue
def run(self):
logger.debug(u'%s: Starting process', self.name)
try:
self.run_inside_try()
except KeyboardInterrupt:
logger.info(u'Interrupted by user')
self.exit(0, u'Interrupted by user')
except SettingsError as e:
logger.error(e.message)
self.exit(1, u'Settings error')
except ImportError as e:
logger.error(e)
self.exit(2, u'Import error')
except Exception as e:
logger.exception(e)
self.exit(3, u'Unknown error')
def run_inside_try(self):
raise NotImplementedError
def destroy(self):
self.terminate()
def exit(self, status=0, reason=None):
self.core_queue.put({'to': 'core', 'command': 'exit',
'status': status, 'reason': reason})
self.destroy()
class BaseThread(multiprocessing.dummy.Process):
def __init__(self, core_queue):
super(BaseThread, self).__init__()
self.core_queue = core_queue
# No thread should block process from exiting
self.daemon = True
def run(self):
logger.debug(u'%s: Starting thread', self.name)
try:
self.run_inside_try()
except KeyboardInterrupt:
logger.info(u'Interrupted by user')
self.exit(0, u'Interrupted by user')
except SettingsError as e:
logger.error(e.message)
self.exit(1, u'Settings error')
except ImportError as e:
logger.error(e)
self.exit(2, u'Import error')
except Exception as e:
logger.exception(e)
self.exit(3, u'Unknown error')
def run_inside_try(self):
raise NotImplementedError
def destroy(self):
pass
def exit(self, status=0, reason=None):
self.core_queue.put({'to': 'core', 'command': 'exit',
'status': status, 'reason': reason})
self.destroy()
class GObjectEventThread(BaseThread):
"""
A GObject event loop which is shared by all Mopidy components that uses
libraries that need a GObject event loop, like GStreamer and D-Bus.
Should be started by Mopidy's core and used by
:mod:`mopidy.output.gstreamer`, :mod:`mopidy.frontend.mpris`, etc.
"""
def __init__(self, core_queue):
super(GObjectEventThread, self).__init__(core_queue)
self.name = u'GObjectEventThread'
self.loop = None
def run_inside_try(self):
self.loop = gobject.MainLoop().run()
def destroy(self):
self.loop.quit()
super(GObjectEventThread, self).destroy()
|
[
"stein.magnus@jodal.no"
] |
stein.magnus@jodal.no
|
e75f3424076f2e5d15b48b4dd8a7d95a4cab2052
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/EGL/EXT/EXT/stream_consumer_egloutput.py
|
92220cad6445f2adbe03b6b49917df998efe4191
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188
| 2017-04-17T11:04:55
| 2017-04-17T11:04:55
| 80,312,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from OpenGLCffi.EGL import params
@params(api='egl', prms=['dpy', 'stream', 'layer'])
def eglStreamConsumerOutputEXT(dpy, stream, layer):
pass
|
[
"cdenizol@gmail.com"
] |
cdenizol@gmail.com
|
fdd1c5c9ed081efbfccfd7332b2d1dc9d7131567
|
e1b8ae703c84f6a06dd3a3072cfa9afb7f9ebce7
|
/settings/base.py
|
1efd5df312d1c81f3a2de70992ff31811d366c08
|
[] |
no_license
|
Code-Institute-Submissions/renanclothestore
|
95a2a161f0f0046e328cb639a88ddaf6afaceae5
|
ea295d1643b06a1f5cdbdbafcdbe767d2c286648
|
refs/heads/master
| 2020-03-26T12:34:21.946183
| 2018-08-13T21:40:09
| 2018-08-13T21:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
"""
Django settings for carapp project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rwl@k11m(xzyh+&^6=#k36hkev0)7s14n1mi7j6$!a9d7o$dh%'
ALLOWED_HOSTS = ["renanclothestore.herokuapp.com", "127.0.0.1", "localhost"]
SITE_ID = 4
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django_forms_bootstrap',
'paypal.standard.ipn',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'carstore.apps.CarstoreConfig',
'accounts',
'paypal_store',
'products',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.EmailAuth',
)
LOGIN_URL = '/login/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
]
ROOT_URLCONF = 'carapp.urls'
AUTH_USER_MODEL = 'accounts.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'carapp.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"renanzabeu@yahoo.it"
] |
renanzabeu@yahoo.it
|
6588bb3ceecc40597d34c01833a1ac05336e8806
|
f1961c86e6da14f35c21d7235f4fc8a89fabdcad
|
/DailyProgrammer/DP20130530B.py
|
294d74df0d02a55cccef69c8006472e34b7d94ec
|
[
"MIT"
] |
permissive
|
DayGitH/Python-Challenges
|
d4930bdd85cd1a977d8f6192775ca956a375fcde
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
refs/heads/master
| 2021-01-17T13:01:03.784523
| 2018-06-29T23:49:04
| 2018-06-29T23:49:04
| 58,497,683
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
"""
[05/30/13] Challenge #126 [Intermediate] Perfect P'th Powers
https://www.reddit.com/r/dailyprogrammer/comments/1fcpnx/053013_challenge_126_intermediate_perfect_pth/
# [](#IntermediateIcon) *(Intermediate)*: Perfect P'th Powers
An integer X is a "perfect square power" if there is some integer Y such that Y^2 = X. An integer X is a "perfect cube
power" if there is some integer Y such that Y^3 = X. We can extrapolate this where P is the power in question: an
integer X is a "perfect p'th power" if there is some integer Y such that Y^P = X.
Your goal is to find the highest value of P for a given X such that for some unknown integer Y, Y^P should equal X. You
can expect the given input integer X to be within the range of an unsigned 32-bit integer (0 to 4,294,967,295).
*Special thanks to the ACM collegiate programming challenges group for giving me the initial idea
[here](http://uva.onlinejudge.org/index.php?option=onlinejudge&page=show_problem&problem=1563).*
# Formal Inputs & Outputs
## Input Description
You will be given a single integer on a single line of text through standard console input. This integer will range
from 0 to 4,294,967,295 (the limits of a 32-bit unsigned integer).
## Output Description
You must print out to standard console the highest value P that fits the above problem description's requirements.
# Sample Inputs & Outputs
## Sample Input
*Note:* These are all considered separate input examples.
17
1073741824
25
## Sample Output
*Note:* The string following the result are notes to help with understanding the example; it is NOT expected of you to
write this out.
1 (17^1)
30 (2^30)
2 (5^2)
"""
def main():
pass
if __name__ == "__main__":
main()
|
[
"akber91@gmail.com"
] |
akber91@gmail.com
|
76789d8e347ae1d7769b0f4e7cb04bbb19e6ff03
|
8478b0b08ebdd284740d23eb9347640b97a3d93f
|
/rttl.py
|
74fd9d49e54edaa63a3155581b1515c1d0d3477e
|
[
"MIT"
] |
permissive
|
koxa22/Symbian-Python-Files
|
4278fd4e25ba37cbd649c8ce07a3dd40f1d4213d
|
59d5c5ad0ea5c7c4ac28b1e1ee6443ed2d354502
|
refs/heads/master
| 2022-07-17T05:36:09.351149
| 2018-04-01T17:18:12
| 2018-04-01T17:18:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,751
|
py
|
# http://www.pymcu.com/PlayingSounds.html
# rttl.py version 1.0.1 (ported version to pyS60)
#
import e32
import pitchy
# RTTL variable to hold RTTL song
#RTTL = 'Bond:d=4,o=5,b=50:32p,16c#6,32d#6,32d#6,16d#6,8d#6,16c#6,16c#6,16c#6,16c#6,32e6,32e6,16e6,8e6,16d#6,16d#6,16d#6,16c#6,32d#6,32d#6,16d#6,8d#6,16c#6,16c#6,16c#6,16c#6,32e6,32e6,16e6,8e6,16d#6,16d6,16c#6,16c#7,c.7,16g#6,16f#6,g#.6'
# A Dictionary that contains the frequencies for each note
noteFreq = {'p':0,'a':3520,'a#':3729,'b':3951,'c':4186,'c#':4435,'d':4699,'d#':4978,'e':5274,'f':5588,'f#':5920,'g':6272,'g#':6645}
# This function will return the default Duration, Octave, and BeatsPerMinute from the RTTL data
def dob(dobData):
dobVals = dobData.split(',')
defaultDur = int(dobVals[0].split('=')[1])
defaultOct = int(dobVals[1].split('=')[1])
defaultBeats = int(dobVals[2].split('=')[1])
return defaultDur, defaultOct, defaultBeats
# This function will return the duration per note from the RTTL note data
def durSplit(noteData):
for d in noteData:
if ord(d) >= 97 and ord(d) <= 122:
durSplit = noteData.split(d)
if len(durSplit[0]) > 0:
return int(durSplit[0])
return 0
# This function will return just the note for dictionary look up from the RTTL note data
def noteSplit(noteData):
note = ''
hasDot = False
for d in noteData:
if ord(d) >= 97 and ord(d) <= 122:
note += d
if ord(d) == 35:
note += d
if ord(d) == 46:
hasDot = True
return note, hasDot
# This function will return per note octave changes if specified in the RTTL note data
def noteOctave(noteData):
if noteData[len(noteData)-1] >= 53 and noteData[len(noteData)-1] <= 56:
return 8 - int(noteData[len(noteData)-1])
else:
return None
def get_song_name(noktune):
rttlParts = noktune.split(':') # Split the RTTL song data into it's 3 core parts
return rttlParts[0] # Song Name
def get_duration(noktune):
rttlParts = noktune.split(':') # Split the RTTL song data into it's 3 core parts
dobVals = (rttlParts[1]).split(',')
defaultDur = int(dobVals[0].split('=')[1])
return defaultDur
def get_octave(noktune):
rttlParts = noktune.split(':') # Split the RTTL song data into it's 3 core parts
dobVals = (rttlParts[1]).split(',')
defaultOct = iint(dobVals[1].split('=')[1])
return defaultOct
def get_bpm(noktune):
rttlParts = noktune.split(':') # Split the RTTL song data into it's 3 core parts
dobVals = (rttlParts[1]).split(',')
defaultBeats = int(dobVals[2].split('=')[1])
return defaultBeats
def play_noktune(noktune,vol):
global noteFreq
tune=[]
rttlParts = noktune.split(':') # Split the RTTL song data into it's 3 core parts
defaultDur, defaultOct, defaultBeats = dob(rttlParts[1]) # Get default Duration, Octave, and Beats Per Minute
rttlNotes = rttlParts[2].split(',') # Split all the note data into a list
for note in rttlNotes: # Iterate through the note list
note = note.strip() # Strip out any possible pre or post spaces in the note data
durVal = durSplit(note) # Determine the per note duration if not default
if durVal == 0: # If there is no per note duration then use default for that note
durVal = defaultDur
duration = 60000 / defaultBeats / durVal * 3 # Calculate the proper duration based on Beats Per Minute and Duration Value
noteStr, hasDot = noteSplit(note) # Get note for dictionary lookup and check if the note has a dot
nFreq = noteFreq[noteStr] # Look up note frequency from the dictionary
if hasDot == True: # if it has a dot calculate the new duration
duration *= 3 / 2
octave = noteOctave(note) # Determine if there is per note octave change
if octave != None: # if so calculate the new octave frequency
nFreq /= octave
else: # else use the default octave frequency
nFreq /= defaultOct
if nFreq == 0: # if nFreq is 0 then it's a pause note so pause for the required time
e32.ao_sleep(float(duration / 1000.0))
else: # else play the note from the song
tune.append((nFreq,duration,vol))
pitchy.play_list(tune)
#play_noktune('Bond:d=4,o=5,b=50:32p,16c#6,32d#6,32d#6,16d#6,8d#6,16c#6,16c#6,16c#6,16c#6,32e6,32e6,16e6,8e6,16d#6,16d#6,16d#6,16c#6,32d#6,32d#6,16d#6,8d#6,16c#6,16c#6,16c#6,16c#6,32e6,32e6,16e6,8e6,16d#6,16d6,16c#6,16c#7,c.7,16g#6,16f#6,g#.6',3)
|
[
"gaurav.ssnl@gmail.com"
] |
gaurav.ssnl@gmail.com
|
d6a101b306dc31549d207905b650b709fd7cd2d5
|
48eeb03decf7fa135368c5a5570186c31f5c85db
|
/dbmanage/mongodb/mongo.py
|
5405ce9ec0320cd6e8e7827a7e0359020455ad07
|
[
"Apache-2.0"
] |
permissive
|
zhonghua001/sbdb
|
d5bb86cb5e4075a8f4dbf0d37c3fa72dd3a05a00
|
40d02767ad58e850bb1df48f87e280424779d30f
|
refs/heads/master
| 2021-01-24T03:52:58.751571
| 2018-02-26T03:51:12
| 2018-02-26T03:51:12
| 122,909,859
| 1
| 1
|
Apache-2.0
| 2020-07-22T02:38:13
| 2018-02-26T03:50:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,096
|
py
|
#coding=UTF-8
import MySQLdb,sys,string,time,datetime,uuid,pymongo,json
# from django.contrib.auth.models import User
from accounts.models import UserInfo
from dbmanage.myapp.models import Db_name,Db_account,Db_instance
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from dbmanage.myapp.include.encrypt import prpcrypt
public_user = settings.PUBLIC_USER
export_limit = int(settings.EXPORT_LIMIT)
def get_mongodb_list(username,tag='tag',search=''):
dbtype='mongodb'
host_list = []
if len(search) ==0:
if (tag=='tag'):
a = UserInfo.objects.get(username=username)
#如果没有对应role='read'或者role='all'的account账号,则不显示在下拉菜单中
for row in a.db_name_set.all().order_by("dbtag"):
if row.db_account_set.all().filter(role__in=['read','all']):
if row.instance.all().filter(role__in=['read','all']).filter(db_type=dbtype):
host_list.append(row.dbtag)
elif (tag=='log'):
for row in Db_name.objects.values('dbtag').distinct().order_by("dbtag"):
host_list.append(row['dbtag'])
elif (tag=='exec'):
a = UserInfo.objects.get(username=username)
#如果没有对应role='write'或者role='all'的account账号,则不显示在下拉菜单中
for row in a.db_name_set.all().order_by("dbtag"):
if row.db_account_set.all().filter(role__in=['write','all']):
#排除只读实例
if row.instance.all().filter(role__in=['write','all']).filter(db_type=dbtype):
host_list.append(row.dbtag)
elif len(search) > 0:
if (tag=='tag'):
a = UserInfo.objects.get(username=username)
#如果没有对应role='read'或者role='all'的account账号,则不显示在下拉菜单中
for row in a.db_name_set.filter(dbname__contains=search).order_by("dbtag"):
if row.db_account_set.all().filter(role__in=['read','all']):
if row.instance.all().filter(role__in=['read','all']).filter(db_type=dbtype):
host_list.append(row.dbtag)
elif (tag=='log'):
for row in Db_name.objects.values('dbtag').distinct().order_by("dbtag"):
host_list.append(row['dbtag'])
elif (tag=='exec'):
a = UserInfo.objects.get(username=username)
#如果没有对应role='write'或者role='all'的account账号,则不显示在下拉菜单中
for row in a.db_name_set.filter(dbname__contains=search).order_by("dbtag"):
if row.db_account_set.all().filter(role__in=['write','all']):
#排除只读实例
if row.instance.all().filter(role__in=['write','all']).filter(db_type=dbtype):
host_list.append(row.dbtag)
return host_list
def get_mongo_coninfo(hosttag,useraccount):
a = Db_name.objects.filter(dbtag=hosttag)[0]
# a = Db_name.objects.get(dbtag=hosttag)
tar_dbname = a.dbname
try:
if a.instance.all().filter(role='read')[0]:
tar_host = a.instance.all().filter(role='read')[0].ip
tar_port = a.instance.all().filter(role='read')[0].port
# 如果没有设置或没有role=read,则选择第一个读到的all实例读取
except Exception, e:
tar_host = a.instance.filter(role='all')[0].ip
tar_port = a.instance.filter(role='all')[0].port
# tar_host = a.instance.all()[0].ip
# tar_port = a.instance.all()[0].port
for i in a.db_account_set.all():
if i.role != 'write' and i.role != 'admin':
# find the specified account for the user
if i.account.all().filter(username=useraccount):
tar_username = i.user
tar_passwd = i.passwd
break
# not find specified account for the user ,specified the public account to the user
if not vars().has_key('tar_username'):
for i in a.db_account_set.all():
if i.role != 'write' and i.role != 'admin':
# find the specified account for the user
if i.account.all().filter(username=public_user):
tar_username = i.user
tar_passwd = i.passwd
break
pc = prpcrypt()
return tar_host,tar_port,tar_username,pc.decrypt(tar_passwd),tar_dbname
def get_db_info(hosttag,useraccount):
tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_mongo_coninfo(hosttag, useraccount)
connect = pymongo.MongoClient(tar_host, int(tar_port))
db = connect[tar_dbname]
try:
db.authenticate(tar_username, tar_passwd)
except Exception, e:
pass
results = db.command({'dbstats': 1})
return results
def get_tb_info(hosttag,tbname,useraccount):
tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_mongo_coninfo(hosttag, useraccount)
connect = pymongo.MongoClient(tar_host, int(tar_port))
db = connect[tar_dbname]
try:
db.authenticate(tar_username, tar_passwd)
except Exception, e:
pass
results = db.command({'collstats': tbname})
return results
def get_tbindex_info(hosttag,tbname,useraccount):
tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_mongo_coninfo(hosttag, useraccount)
connect = pymongo.MongoClient(tar_host, int(tar_port))
db = connect[tar_dbname]
try:
db.authenticate(tar_username, tar_passwd)
except Exception, e:
pass
collection = db[tbname]
results = collection.index_information()
return results
def get_mongo_collection(hosttag,useraccount):
try:
tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_mongo_coninfo(hosttag, useraccount)
# 此处根据tablename获取其他信息
connect = pymongo.MongoClient(tar_host, int(tar_port))
db = connect[tar_dbname]
try:
db.authenticate(tar_username, tar_passwd)
except Exception, e:
pass
results = db.collection_names()
except Exception, e:
results,col = ([str(e)],''),['error']
return results
def get_mongo_data(b,hosttag,tbname,useraccount):
try:
num = int(UserInfo.objects.get(username=useraccount).user_profile.export_limit)
except Exception, e:
num = export_limit
try:
tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_mongo_coninfo(hosttag, useraccount)
#此处根据tablename获取其他信息
connect = pymongo.MongoClient(tar_host,int(tar_port))
db=connect[tar_dbname]
try:
db.authenticate(tar_username,tar_passwd)
except Exception,e:
pass
#tablename = tablename
collection = db[tbname]
#a = '''{'currTime': 1477371861706}'''
resulta = collection.find(eval(b),{"_id":0}).limit(num)
# resulta = collection.find().limit(20)
#results = db.collection_names() #获取所有tables名字
results = []
for recordjson in resulta:
#resultdict = {}
#for k,v in recordjson:
# resultdict[k] = v.encode('gb18030') #["ObjectId('580ee6e6f3de9821b20e57db') is not JSON serializable"]
results.append(json.dumps(recordjson,ensure_ascii=False,cls=DjangoJSONEncoder))
#results.append(recordjson)
except Exception, e:
results = (['error'],'')
return results
if __name__ == '__main__':
#x="insert /*sdfs*/into mysql.test ;truncate table mysql.db;rename mysql.db ;rename asdf;delete from `msql`.sa set ;delete ignore from t1 mysql.test values sdasdf;insert into ysql.user values()"
# print x
#x=" /*! */; select /**/ #asdfasdf; \nfrom mysql_replication_history;"
#x = " insert into item_infor (id,name) values(7,'t\\'e\"st');drop t * from test;"
print get_mongo_data('{"_class" : "com.mongodb.BasicDBObject"}','mongodb-easemob','message','root')
|
[
"123"
] |
123
|
ee61071610eab8a0aed3f149a7ea92a7aed2bdd9
|
86c30560810feb9ba6a70fa7e5e1c46d039a4bb2
|
/Ass/3/gameDemo/gd_fucks/alien.py
|
9c9c004fefaba806131bd4d7a66ad1b63244ee67
|
[] |
no_license
|
zubairabid/ITWS2
|
3f0608835e79725f88fd1658b40a65f1a4a2ef47
|
4703aba0ada3b0a0a165155ba1177420a110decf
|
refs/heads/master
| 2021-03-27T12:47:17.522346
| 2018-04-15T00:11:43
| 2018-04-15T00:11:43
| 116,122,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
import globalset as gs
class Alien:
def __init__(self, x, y):
self.txt = gs.alien
self.x = x
self.y = y
self.life = gs.ALIEN_LIFE
def update(self, sess):
self.life -= gs.CLOCK_CYCLE
if(self.life <= 0):
self.remove(sess)
def remove(self, sess):
if(self in sess.alist):
# c = 0
# for t in sess.alist:
# if(self == t):
# sess.alist.remove(c)
# break
# c += 1
sess.alist.remove(self)
sess.screen[self.x][self.y] = None
|
[
"zubairabid1999+github@gmail.com"
] |
zubairabid1999+github@gmail.com
|
a5a8886cd1b54a2f799c66975c4821c04172df57
|
6533c515c936d999c4bd8192d03d01c56172940c
|
/wlauto/commands/run.py
|
c5432efe274167162fcfca1b1422e0f8c06fc751
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
caogao/workload-automation
|
330558c1a51b42a6e9e3291333f7c0653975c617
|
44a49db04da3224003f16bf82db0dc309422b710
|
refs/heads/master
| 2021-01-15T11:07:01.869199
| 2016-04-15T15:39:24
| 2016-04-15T15:39:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,053
|
py
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import shutil
import wlauto
from wlauto import Command, settings
from wlauto.exceptions import ConfigError
from wlauto.core.agenda import Agenda
from wlauto.core.execution import Executor
from wlauto.utils.log import add_log_file
class RunCommand(Command):
name = 'run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""
Agenda for this workload automation run. This defines which
workloads will be executed, how many times, with which
tunables, etc. See example agendas in {} for an example of
how this file should be structured.
""".format(os.path.dirname(wlauto.__file__)))
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help="""
Specify a directory where the output will be generated. If
the directory already exists, the script will abort unless -f
option (see below) is used, in which case the contents of the
directory will be overwritten. If this option is not specified,
then {} will be used instead.
""".format(settings.output_directory))
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Overwrite output directory if it exists. By default, the script
will abort in this situation to prevent accidental data loss.
""")
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help="""
Specify a workload spec ID from an agenda to run. If this is
specified, only that particular spec will be run, and other
workloads in the agenda will be ignored. This option may be
used to specify multiple IDs.
""")
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily disable a troublesome
instrument for a particular run without introducing permanent
change to the config (which one might then forget to revert).
This option may be specified multiple times.
""")
def execute(self, args): # NOQA
self.set_up_output_directory(args)
add_log_file(settings.log_file)
if os.path.isfile(args.agenda):
agenda = Agenda(args.agenda)
settings.agenda = args.agenda
shutil.copy(args.agenda, settings.meta_directory)
elif '.' in args.agenda or os.sep in args.agenda:
raise ConfigError('Agenda "{}" does not exist.'.format(args.agenda))
else:
self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda))
agenda = Agenda()
agenda.add_workload_entry(args.agenda)
if args.instruments_to_disable:
if 'instrumentation' not in agenda.config:
agenda.config['instrumentation'] = []
for itd in args.instruments_to_disable:
self.logger.debug('Updating agenda to disable {}'.format(itd))
agenda.config['instrumentation'].append('~{}'.format(itd))
basename = 'config_'
for file_number, path in enumerate(settings.get_config_paths(), 1):
file_ext = os.path.splitext(path)[1]
shutil.copy(path, os.path.join(settings.meta_directory,
basename + str(file_number) + file_ext))
executor = Executor()
executor.execute(agenda, selectors={'ids': args.only_run_ids})
def set_up_output_directory(self, args):
if args.output_directory:
settings.output_directory = args.output_directory
self.logger.debug('Using output directory: {}'.format(settings.output_directory))
if os.path.exists(settings.output_directory):
if args.force:
self.logger.info('Removing existing output directory.')
shutil.rmtree(settings.output_directory)
else:
self.logger.error('Output directory {} exists.'.format(settings.output_directory))
self.logger.error('Please specify another location, or use -f option to overwrite.\n')
sys.exit(1)
self.logger.info('Creating output directory.')
os.makedirs(settings.output_directory)
os.makedirs(settings.meta_directory)
|
[
"sergei.trofimov@arm.com"
] |
sergei.trofimov@arm.com
|
c14936fab65953dbc7436882b80b1d347adc2081
|
62e4a186dc4b6294748ea6f1b6432219b5acf5ad
|
/backend/home/migrations/0001_load_initial_data.py
|
46847f2ef86805729f40bbd28060eabdfb9c3bef
|
[] |
no_license
|
crowdbotics-apps/tester-app-31668
|
60413ed775d07c8f958bb30e5398b1941722a8bb
|
d3e5ba380ee2041c07e73b7e8b45dbe3c9955a03
|
refs/heads/master
| 2023-09-01T07:48:38.823415
| 2021-10-26T02:03:42
| 2021-10-26T02:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tester-app-31668.botics.co"
site_params = {
"name": "Tester App",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
21df8591a5c6f1ead6ded6ad17b51db1a42ef0ca
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.0_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=65/params.py
|
a4991794aa98f582d2bdb2947fdbb119be76f15a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '2.016476',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 65,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
6dd1cc97f758079d3f409b647478f0d62e72da99
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_joey.py
|
bc33ba9a42bf180b251f994f39bb062ec82865e5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
#calss header
class _JOEY():
def __init__(self,):
self.name = "JOEY"
self.definitions = [u'a young kangaroo']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0eef22e43e4999b7e13da85bee5a3719f09f6108
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/nn_ns/Bijection/BijectiveNumeration.py
|
d21afc99a4a5f19a282db7981ccf2d6d2a195f95
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
'''
https://en.wikipedia.org/wiki/Bijective_numeration
Radix <- PInt
BiDigit = [1..Radix]
# compare Digit = [0..Radix-1]
# little-endian
bidigitsLE2uint :: [BiDigit] -> UInt
bidigitsLE2uint ds = f ds 1 where
f [] weight = 0
f (h:ts) weight = h*weight + f ts (weight*Radix)
bidigitsLE2uint = f where
f [] = 0
f (h:ts) = h + (f ts) * Radix
bidivmod :: UInt -> PInt -> (UInt, PInt)
bidivmod n d = (q, r) where
# q = ceil(n/d) - 1 = floor((n+d-1)/d) - 1 = floor((n-1)/d)
q = (n-1)//d
r = n - q*d
uint2bidigitsLE :: UInt -> [BiDigit]
uint2bidigitsLE = f where
f 0 = []
f n = r : f q where
(q, r) = bidivmod n Radix
'''
__all__ = '''
bidigits2uint__little_endian
uint2bidigits__little_endian
'''.split()
from .ArbitraryRadixNumber import \
number2iter_arbitrary_radix_reprLE, arbitrary_radix_reprBE2number
def bidivmod(n, d):
#ssert n >= 0
#ssert d >= 1
q, r = divmod(n-1, d)
r += 1
return q, r
q = (n-1)//d
r = n - q*d
def uint2iter_bidigitsLE(radix, u):
assert u >= 0
assert radix >= 1 # need not 2
return number2iter_arbitrary_radix_reprLE(u, radix, 0, bidivmod)
''' bug: should comment below code to disable 'yield'!!
while u > 0:
u, r = bidivmod(u, radix)
yield r
'''
def uint2bidigitsLE(radix, u):
return tuple(uint2iter_bidigitsLE(radix, u))
def bidigitsLE2uint(radix, bidigits):
# little-endian
assert all(1<=d<=radix for d in bidigits)
return arbitrary_radix_reprBE2number(reversed(bidigits), radix, 0)
u = 0
for d in reversed(bidigits):
u *= radix
u += d
return u
bidigits2uint__little_endian = bidigitsLE2uint
uint2bidigits__little_endian = uint2bidigitsLE
def test():
for radix in range(1, 5):
for u in range(100):
bs = uint2bidigitsLE(radix, u)
u_ = bidigitsLE2uint(radix, bs)
#rint(u, bs, u_)
assert u == u_
from itertools import product
for radix in range(1, 5):
for L in range(5):
for bs in product(range(1, radix+1), repeat=L):
u = bidigitsLE2uint(radix, bs)
bs_ = uint2bidigitsLE(radix, u)
assert bs == bs_
if __name__ == '__main__':
print('test BijectiveNumeration.py')
test()
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
b10b08af4d84840fac55aead8c737624c5df79f2
|
5285d7071a74d8b56ad45a2a5b1f7d49ee672916
|
/postexp/module.py
|
d6ac8cba41b03bee27c3afab06044373853111ca
|
[] |
no_license
|
krishpranav/postexp
|
9d4e1bf3be72ee27418449b4bc824ebdf9be1b44
|
348d7813a44585c9d34dc1d030380af0bd90a8f3
|
refs/heads/master
| 2023-04-20T04:33:32.173926
| 2021-05-06T05:23:02
| 2021-05-06T05:23:02
| 364,470,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
#!/usr/bin/env python3
# imports
import os
import pkg_resources
from importlib import import_module
INDEX_FILE = 'modindex.txt'
client_commands = {}
server_commands = {}
def client_handler(cmd):
def decorate(func):
client_commands[cmd] = func
return decorate
def server_handler(cmd):
def decorate(func):
server_commands[cmd] = func
return decorate
def load_modules():
for fname in pkg_resources.resource_string(__name__, INDEX_FILE).split():
if fname.endswith('.py'):
mod = os.path.splitext(fname)[0]
if mod == '__init__':
continue
elif mod in server_commands.keys():
raise Exception('duplicate module detected: {}'.format(mod))
import_module('modules.' + mod)
|
[
"krisna.pranav@gmail.com"
] |
krisna.pranav@gmail.com
|
85b2e627fa812d32731797a4a20c53bbac3ec85c
|
d966694f2eb0fe09582716cf6ce60dba6f5370b8
|
/B站/twoWEB表单/Flask_WTF_demo2.py
|
6d57de3e3a23b889dded1637680484b45e2d9c68
|
[] |
no_license
|
lijianmin01/Flask_study
|
217f1e9a7cd3511407144b6daa5cf13e962a6307
|
98639f471a88d5349a38564512f35c660057b9de
|
refs/heads/main
| 2023-01-18T16:29:27.143408
| 2020-11-26T10:22:28
| 2020-11-26T10:22:28
| 315,005,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
from flask import Flask,render_template,request
app = Flask(__name__)
@app.route("/",methods=['POST','GET'])
def index():
message = None
if request.method == 'POST':
username = request.form.get("username")
password = request.form.get("password")
password2 = request.form.get("password2")
# 3、判断参数是否填写 & 密码是否相同
if not all([username,password,password2]):
print("参数不完成")
message = "参数不完整"
elif password2!=password:
message = "两次密码不一致"
else:
message = "success"
return render_template('index.html',message=message)
if __name__ == '__main__':
app.run(debug=True)
|
[
"lijianmin01@126.com"
] |
lijianmin01@126.com
|
2bbc03087bb2db7a50467af5b4a2381fcdc265ce
|
093b9569be9d1c4e5daf92efbebc38f680917b2d
|
/.history/base/views_20210829091127.py
|
806d5e71eb0dda642f9ec037408d383e22ca9a91
|
[] |
no_license
|
Justin-Panagos/todoList
|
95b1e97ff71af1b0be58e7f8937d726a687cea4d
|
10539219b59fcea00f8b19a406db3d4c3f4d289e
|
refs/heads/master
| 2023-08-04T13:27:13.309769
| 2021-08-29T14:06:43
| 2021-08-29T14:06:43
| 400,827,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login
from .models import Task
#login view
class CustoomLoginView(LoginView):
template_name = 'base/login.html'
fields = '__all__'
redirect_authenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
#
class RegisterPage(FormView):
template_name = 'base/register.html'
form_class= UserCreationForm
redirect_authenticated_user = True
success_url = reverse_lazy('tasks')
def form_validate(self,form):
user= form.save()
if user is not None:
login(self.request, user)
return super(RegisterPage, self).form_validate(form)
class TaskList( LoginRequiredMixin, ListView):
model = Task
context_object_name = 'tasks'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tasks'] = context['tasks'].filter(user=self.request.user)
context['count'] = context['tasks'].filter(complete=False).count()
return context
class TaskDetail(LoginRequiredMixin, DetailView):
model = Task
context_object_name = 'task'
template_name = 'base/task.html'
class TaskCreate(LoginRequiredMixin, CreateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
def form_valid(self, form):
form.instance.user = self.request.user
return super(TaskCreate, self).form_valid(form)
class TaskUpdate( LoginRequiredMixin, UpdateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
class TaskDelete(LoginRequiredMixin, DeleteView):
model = Task
context_object_name = 'task'
success_url = reverse_lazy('tasks')
|
[
"justpanagos@gmail.com"
] |
justpanagos@gmail.com
|
01c5946ab4cfc183f51a78e89ad9061896b00355
|
bc531455ed161db04aedfa79f6daae32efefa321
|
/benchmarks/datasets/openml_sylvine/info.py
|
7f5014ae810696f8e166ade6a72d96e4a26c81ab
|
[] |
no_license
|
mindsdb/benchmarks
|
b46f46f59047a2d3f6a0624addb3c281471d6092
|
a122a85bb0124da8a469f8ef8baafdf9a70bfb5a
|
refs/heads/main
| 2023-08-30T19:33:17.340246
| 2021-11-01T23:09:54
| 2021-11-01T23:09:54
| 302,911,061
| 5
| 23
| null | 2021-10-30T19:25:48
| 2020-10-10T13:44:16
|
Python
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
from benchmarks.helpers.accuracy import balanced_accuracy_score, roc_auc
from benchmarks.datasets.dataset import DatasetInterface
class Dataset(DatasetInterface):
tags = ['classification']
learn_kwargs = {}
num_folds = 5
accuracy_functions = [balanced_accuracy_score, roc_auc]
file = 'data.csv'
active = True
target = 'class'
source = 'https://www.openml.org/d/41146'
license = 'Public Domain Mark 1.0'
is_open_license = True
|
[
"george@cerebralab.com"
] |
george@cerebralab.com
|
f9a367342ececac14729f37a9b60ab04e704c21a
|
225a9d69ad0d1c4741afc42e17cb15e188a88959
|
/page_counter/wsgi.py
|
3473ab2f995ea3fc69f0d6135e0e9fb26e792f02
|
[] |
no_license
|
priyankaonly1/Page_Counter_project
|
445bbef67739af299273433b3094775fd94a1aef
|
f2c8b6f24ca0b04fba38dec98329f53ffe7053d6
|
refs/heads/main
| 2023-06-02T03:50:40.374197
| 2021-06-19T10:45:57
| 2021-06-19T10:45:57
| 378,388,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for page_counter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'page_counter.settings')
application = get_wsgi_application()
|
[
"priyankabiswasonly1@gmail.com"
] |
priyankabiswasonly1@gmail.com
|
1759c3db5b289a6c16e38b0aee69020ea0b3073b
|
8a452b71e3942d762fc2e86e49e72eac951b7eba
|
/leetcode/editor/en/[1827]Minimum Operations to Make the Array Increasing.py
|
b4069f64857e87c713a853fd9375da69f74b4076
|
[] |
no_license
|
tainenko/Leetcode2019
|
7bea3a6545f97c678a176b93d6622f1f87e0f0df
|
8595b04cf5a024c2cd8a97f750d890a818568401
|
refs/heads/master
| 2023-08-02T18:10:59.542292
| 2023-08-02T17:25:49
| 2023-08-02T17:25:49
| 178,761,023
| 5
| 0
| null | 2019-08-27T10:59:12
| 2019-04-01T01:04:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
# You are given an integer array nums (0-indexed). In one operation, you can
# choose an element of the array and increment it by 1.
#
#
# For example, if nums = [1,2,3], you can choose to increment nums[1] to make
# nums = [1,3,3].
#
#
# Return the minimum number of operations needed to make nums strictly
# increasing.
#
# An array nums is strictly increasing if nums[i] < nums[i+1] for all 0 <= i <
# nums.length - 1. An array of length 1 is trivially strictly increasing.
#
#
# Example 1:
#
#
# Input: nums = [1,1,1]
# Output: 3
# Explanation: You can do the following operations:
# 1) Increment nums[2], so nums becomes [1,1,2].
# 2) Increment nums[1], so nums becomes [1,2,2].
# 3) Increment nums[2], so nums becomes [1,2,3].
#
#
# Example 2:
#
#
# Input: nums = [1,5,2,4,1]
# Output: 14
#
#
# Example 3:
#
#
# Input: nums = [8]
# Output: 0
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 5000
# 1 <= nums[i] <= 10⁴
#
#
# Related Topics Array Greedy 👍 747 👎 34
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def minOperations(self, nums: List[int]) -> int:
if len(nums) <= 1:
return 0
total = 0
prev = nums[0]
for num in nums[1:]:
if num <= prev:
total += prev - num + 1
prev += 1
else:
prev = num
return total
# leetcode submit region end(Prohibit modification and deletion)
|
[
"31752048+tainenko@users.noreply.github.com"
] |
31752048+tainenko@users.noreply.github.com
|
459542f9961620ec16485e394cacb1d4532de3fb
|
6930e9d3372e83cf43a47ae8ad165f83a218aee2
|
/capture/noworkflow/now/models/__init__.py
|
92802914a0447e80de232d80944205dcce8db60e
|
[
"MIT"
] |
permissive
|
hugobowne/noworkflow
|
02ab47a8b3377ee56f1e7c4552a8dbcb3d15e5f0
|
333cbe274348428f1a9514fe81406f8416036845
|
refs/heads/master
| 2021-01-17T20:27:16.524245
| 2015-11-18T23:53:28
| 2015-11-18T23:53:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# Copyright (c) 2015 Universidade Federal Fluminense (UFF)
# Copyright (c) 2015 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from .model import Model
from .history import History
from .trial import Trial
from .diff import Diff
from .trial_prolog import TrialProlog
|
[
"joaofelipenp@gmail.com"
] |
joaofelipenp@gmail.com
|
6aba572d1cba098d672c298e067d0dfc92d91b91
|
d2fc4d45b115fb861097657d00b3c5cb08e8a3ad
|
/scenarios/customer_create_debit/executable.py
|
c7e6fcc23d760ad3dc7b1ac2dcbd3e0eb70c15d5
|
[] |
no_license
|
jess010/balanced-python
|
81b39f0e9d3ce52d60f2453b8c98e77f07ee3acb
|
b7a6bf0430ad0299d96de15ea97d3d4ccfb4c958
|
refs/heads/master
| 2020-12-25T16:13:35.626111
| 2013-09-20T00:14:58
| 2013-09-20T00:14:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
import balanced
balanced.configure('b5de51921b2d11e389c4026ba7cac9da')
customer = balanced.Customer.find('/v1/customers/AC6M5tIyndBqrv4fEdubPUhg')
customer.debit(amount=5000)
|
[
"ben@unfiniti.com"
] |
ben@unfiniti.com
|
9054213d9c841a3b76c2f2119fa94feb4f9e689d
|
60530f3e1d22fcb3b51be0b9f482a912c8203bb0
|
/Python_Workbook/H01.py
|
b9b81c4afc7f2d00d7a879db0647b1ec34ce4b0f
|
[] |
no_license
|
cinxdy/Python_practice
|
1afb2aca1c92d16d98459407ae02ca2ed7f7832c
|
8a2642b51c6ad73840dae964b1a55cbb53f7b9f7
|
refs/heads/master
| 2022-05-09T00:10:15.849881
| 2022-04-03T09:38:53
| 2022-04-03T09:38:53
| 180,339,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
from random import *
count = 0
answer = randint(1,100)
while 1:
number_try = int(input("Try guess number between 1 and 100 "))
count+=1
if number_try > answer : print("answer is smaller number")
elif number_try < answer : print("answer is bigger number")
else :
print("You got it! try count : %d"%count)
break
|
[
"cinxdys2@gmail.com"
] |
cinxdys2@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.